diff options
Diffstat (limited to 'drivers/base')
29 files changed, 2065 insertions, 796 deletions
diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 527d291706e8..6b2a84e7f2be 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_REGMAP)	+= regmap/  obj-$(CONFIG_SOC_BUS) += soc.o  obj-$(CONFIG_PINCTRL) += pinctrl.o  obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o +obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o  ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG diff --git a/drivers/base/base.h b/drivers/base/base.h index fd3347d9f153..1782f3aa386e 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -63,7 +63,7 @@ struct driver_private {   *	binding of drivers which were unable to get all the resources needed by   *	the device; typically because it depends on another driver getting   *	probed first. - * @device - pointer back to the struct class that this structure is + * @device - pointer back to the struct device that this structure is   * associated with.   *   * Nothing outside of the driver core should ever touch these fields. @@ -134,6 +134,7 @@ extern int devres_release_all(struct device *dev);  /* /sys/devices directory */  extern struct kset *devices_kset; +extern void devices_kset_move_last(struct device *dev);  #if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)  extern void module_add_driver(struct module *mod, struct device_driver *drv); diff --git a/drivers/base/core.c b/drivers/base/core.c index dafae6d2f7ac..334ec7ef1960 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -534,6 +534,52 @@ static DEVICE_ATTR_RO(dev);  struct kset *devices_kset;  /** + * devices_kset_move_before - Move device in the devices_kset's list. + * @deva: Device to move. + * @devb: Device @deva should come before. + */ +static void devices_kset_move_before(struct device *deva, struct device *devb) +{ +	if (!devices_kset) +		return; +	pr_debug("devices_kset: Moving %s before %s\n", +		 dev_name(deva), dev_name(devb)); +	spin_lock(&devices_kset->list_lock); +	list_move_tail(&deva->kobj.entry, &devb->kobj.entry); +	spin_unlock(&devices_kset->list_lock); +} + +/** + * devices_kset_move_after - Move device in the devices_kset's list. + * @deva: Device to move + * @devb: Device @deva should come after. + */ +static void devices_kset_move_after(struct device *deva, struct device *devb) +{ +	if (!devices_kset) +		return; +	pr_debug("devices_kset: Moving %s after %s\n", +		 dev_name(deva), dev_name(devb)); +	spin_lock(&devices_kset->list_lock); +	list_move(&deva->kobj.entry, &devb->kobj.entry); +	spin_unlock(&devices_kset->list_lock); +} + +/** + * devices_kset_move_last - move the device to the end of devices_kset's list. + * @dev: device to move + */ +void devices_kset_move_last(struct device *dev) +{ +	if (!devices_kset) +		return; +	pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev)); +	spin_lock(&devices_kset->list_lock); +	list_move_tail(&dev->kobj.entry, &devices_kset->list); +	spin_unlock(&devices_kset->list_lock); +} + +/**   * device_create_file - create sysfs attribute file for device.   * @dev: device.   * @attr: device attribute descriptor. @@ -662,6 +708,9 @@ void device_initialize(struct device *dev)  	INIT_LIST_HEAD(&dev->devres_head);  	device_pm_init(dev);  	set_dev_node(dev, -1); +#ifdef CONFIG_GENERIC_MSI_IRQ +	INIT_LIST_HEAD(&dev->msi_list); +#endif  }  EXPORT_SYMBOL_GPL(device_initialize); @@ -1252,6 +1301,19 @@ void device_unregister(struct device *dev)  }  EXPORT_SYMBOL_GPL(device_unregister); +static struct device *prev_device(struct klist_iter *i) +{ +	struct klist_node *n = klist_prev(i); +	struct device *dev = NULL; +	struct device_private *p; + +	if (n) { +		p = to_device_private_parent(n); +		dev = p->device; +	} +	return dev; +} +  static struct device *next_device(struct klist_iter *i)  {  	struct klist_node *n = klist_next(i); @@ -1341,6 +1403,36 @@ int device_for_each_child(struct device *parent, void *data,  EXPORT_SYMBOL_GPL(device_for_each_child);  /** + * device_for_each_child_reverse - device child iterator in reversed order. + * @parent: parent struct device. + * @fn: function to be called for each device. + * @data: data for the callback. + * + * Iterate over @parent's child devices, and call @fn for each, + * passing it @data. + * + * We check the return of @fn each time. If it returns anything + * other than 0, we break out and return that value. + */ +int device_for_each_child_reverse(struct device *parent, void *data, +				  int (*fn)(struct device *dev, void *data)) +{ +	struct klist_iter i; +	struct device *child; +	int error = 0; + +	if (!parent->p) +		return 0; + +	klist_iter_init(&parent->p->klist_children, &i); +	while ((child = prev_device(&i)) && !error) +		error = fn(child, data); +	klist_iter_exit(&i); +	return error; +} +EXPORT_SYMBOL_GPL(device_for_each_child_reverse); + +/**   * device_find_child - device iterator for locating a particular device.   * @parent: parent struct device   * @match: Callback function to check device @@ -1923,12 +2015,15 @@ int device_move(struct device *dev, struct device *new_parent,  		break;  	case DPM_ORDER_DEV_AFTER_PARENT:  		device_pm_move_after(dev, new_parent); +		devices_kset_move_after(dev, new_parent);  		break;  	case DPM_ORDER_PARENT_BEFORE_DEV:  		device_pm_move_before(new_parent, dev); +		devices_kset_move_before(new_parent, dev);  		break;  	case DPM_ORDER_DEV_LAST:  		device_pm_move_last(dev); +		devices_kset_move_last(dev);  		break;  	} diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 78720e706176..91bbb1959d8d 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -41,7 +41,7 @@ static void change_cpu_under_node(struct cpu *cpu,  	cpu->node_id = to_nid;  } -static int __ref cpu_subsys_online(struct device *dev) +static int cpu_subsys_online(struct device *dev)  {  	struct cpu *cpu = container_of(dev, struct cpu, dev);  	int cpuid = dev->id; diff --git a/drivers/base/dd.c b/drivers/base/dd.c index a638bbb1a27a..be0eb4639128 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -304,6 +304,14 @@ static int really_probe(struct device *dev, struct device_driver *drv)  			goto probe_failed;  	} +	/* +	 * Ensure devices are listed in devices_kset in correct order +	 * It's important to move Dev to the end of devices_kset before +	 * calling .probe, because it could be recursive and parent Dev +	 * should always go first +	 */ +	devices_kset_move_last(dev); +  	if (dev->bus->probe) {  		ret = dev->bus->probe(dev);  		if (ret) @@ -399,6 +407,8 @@ EXPORT_SYMBOL_GPL(wait_for_device_probe);   *   * This function must be called with @dev lock held.  When called for a   * USB interface, @dev->parent lock must be held as well. + * + * If the device has a parent, runtime-resume the parent before driver probing.   */  int driver_probe_device(struct device_driver *drv, struct device *dev)  { @@ -410,10 +420,16 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)  	pr_debug("bus: '%s': %s: matched device %s with driver %s\n",  		 drv->bus->name, __func__, dev_name(dev), drv->name); +	if (dev->parent) +		pm_runtime_get_sync(dev->parent); +  	pm_runtime_barrier(dev);  	ret = really_probe(dev, drv);  	pm_request_idle(dev); +	if (dev->parent) +		pm_runtime_put(dev->parent); +  	return ret;  } @@ -507,11 +523,17 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)  	device_lock(dev); +	if (dev->parent) +		pm_runtime_get_sync(dev->parent); +  	bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver);  	dev_dbg(dev, "async probe completed\n");  	pm_request_idle(dev); +	if (dev->parent) +		pm_runtime_put(dev->parent); +  	device_unlock(dev);  	put_device(dev); @@ -541,6 +563,9 @@ static int __device_attach(struct device *dev, bool allow_async)  			.want_async = false,  		}; +		if (dev->parent) +			pm_runtime_get_sync(dev->parent); +  		ret = bus_for_each_drv(dev->bus, NULL, &data,  					__device_attach_driver);  		if (!ret && allow_async && data.have_async) { @@ -557,6 +582,9 @@ static int __device_attach(struct device *dev, bool allow_async)  		} else {  			pm_request_idle(dev);  		} + +		if (dev->parent) +			pm_runtime_put(dev->parent);  	}  out_unlock:  	device_unlock(dev); diff --git a/drivers/base/devres.c b/drivers/base/devres.c index c8a53d1e019f..875464690117 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -297,10 +297,10 @@ void * devres_get(struct device *dev, void *new_res,  	if (!dr) {  		add_dr(dev, &new_dr->node);  		dr = new_dr; -		new_dr = NULL; +		new_res = NULL;  	}  	spin_unlock_irqrestore(&dev->devres_lock, flags); -	devres_free(new_dr); +	devres_free(new_res);  	return dr->data;  } diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 894bda114224..8524450e75bd 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -443,7 +443,7 @@ static int fw_add_devm_name(struct device *dev, const char *name)  		return -ENOMEM;  	fwn->name = kstrdup_const(name, GFP_KERNEL);  	if (!fwn->name) { -		kfree(fwn); +		devres_free(fwn);  		return -ENOMEM;  	} diff --git a/drivers/base/node.c b/drivers/base/node.c index 31df474d72f4..560751bad294 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -392,6 +392,16 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)  	for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {  		int page_nid; +		/* +		 * memory block could have several absent sections from start. +		 * skip pfn range from absent section +		 */ +		if (!pfn_present(pfn)) { +			pfn = round_down(pfn + PAGES_PER_SECTION, +					 PAGES_PER_SECTION) - 1; +			continue; +		} +  		page_nid = get_nid_for_pfn(pfn);  		if (page_nid < 0)  			continue; diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c new file mode 100644 index 000000000000..1857a5dd0816 --- /dev/null +++ b/drivers/base/platform-msi.c @@ -0,0 +1,282 @@ +/* + * MSI framework for platform devices + * + * Copyright (C) 2015 ARM Limited, All Rights Reserved. + * Author: Marc Zyngier <[email protected]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program.  If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/device.h> +#include <linux/idr.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/msi.h> +#include <linux/slab.h> + +#define DEV_ID_SHIFT	24 + +/* + * Internal data structure containing a (made up, but unique) devid + * and the callback to write the MSI message. + */ +struct platform_msi_priv_data { +	irq_write_msi_msg_t	write_msg; +	int			devid; +}; + +/* The devid allocator */ +static DEFINE_IDA(platform_msi_devid_ida); + +#ifdef GENERIC_MSI_DOMAIN_OPS +/* + * Convert an msi_desc to a globaly unique identifier (per-device + * devid + msi_desc position in the msi_list). + */ +static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc) +{ +	u32 devid; + +	devid = desc->platform.msi_priv_data->devid; + +	return (devid << (32 - DEV_ID_SHIFT)) | desc->platform.msi_index; +} + +static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) +{ +	arg->desc = desc; +	arg->hwirq = platform_msi_calc_hwirq(desc); +} + +static int platform_msi_init(struct irq_domain *domain, +			     struct msi_domain_info *info, +			     unsigned int virq, irq_hw_number_t hwirq, +			     msi_alloc_info_t *arg) +{ +	struct irq_data *data; + +	irq_domain_set_hwirq_and_chip(domain, virq, hwirq, +				      info->chip, info->chip_data); + +	/* +	 * Save the MSI descriptor in handler_data so that the +	 * irq_write_msi_msg callback can retrieve it (and the +	 * associated device). +	 */ +	data = irq_domain_get_irq_data(domain, virq); +	data->handler_data = arg->desc; + +	return 0; +} +#else +#define platform_msi_set_desc		NULL +#define platform_msi_init		NULL +#endif + +static void platform_msi_update_dom_ops(struct msi_domain_info *info) +{ +	struct msi_domain_ops *ops = info->ops; + +	BUG_ON(!ops); + +	if (ops->msi_init == NULL) +		ops->msi_init = platform_msi_init; +	if (ops->set_desc == NULL) +		ops->set_desc = platform_msi_set_desc; +} + +static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg) +{ +	struct msi_desc *desc = irq_data_get_irq_handler_data(data); +	struct platform_msi_priv_data *priv_data; + +	priv_data = desc->platform.msi_priv_data; + +	priv_data->write_msg(desc, msg); +} + +static void platform_msi_update_chip_ops(struct msi_domain_info *info) +{ +	struct irq_chip *chip = info->chip; + +	BUG_ON(!chip); +	if (!chip->irq_mask) +		chip->irq_mask = irq_chip_mask_parent; +	if (!chip->irq_unmask) +		chip->irq_unmask = irq_chip_unmask_parent; +	if (!chip->irq_eoi) +		chip->irq_eoi = irq_chip_eoi_parent; +	if (!chip->irq_set_affinity) +		chip->irq_set_affinity = msi_domain_set_affinity; +	if (!chip->irq_write_msi_msg) +		chip->irq_write_msi_msg = platform_msi_write_msg; +} + +static void platform_msi_free_descs(struct device *dev) +{ +	struct msi_desc *desc, *tmp; + +	list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) { +		list_del(&desc->list); +		free_msi_entry(desc); +	} +} + +static int platform_msi_alloc_descs(struct device *dev, int nvec, +				    struct platform_msi_priv_data *data) + +{ +	int i; + +	for (i = 0; i < nvec; i++) { +		struct msi_desc *desc; + +		desc = alloc_msi_entry(dev); +		if (!desc) +			break; + +		desc->platform.msi_priv_data = data; +		desc->platform.msi_index = i; +		desc->nvec_used = 1; + +		list_add_tail(&desc->list, dev_to_msi_list(dev)); +	} + +	if (i != nvec) { +		/* Clean up the mess */ +		platform_msi_free_descs(dev); + +		return -ENOMEM; +	} + +	return 0; +} + +/** + * platform_msi_create_irq_domain - Create a platform MSI interrupt domain + * @np:		Optional device-tree node of the interrupt controller + * @info:	MSI domain info + * @parent:	Parent irq domain + * + * Updates the domain and chip ops and creates a platform MSI + * interrupt domain. + * + * Returns: + * A domain pointer or NULL in case of failure. + */ +struct irq_domain *platform_msi_create_irq_domain(struct device_node *np, +						  struct msi_domain_info *info, +						  struct irq_domain *parent) +{ +	struct irq_domain *domain; + +	if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) +		platform_msi_update_dom_ops(info); +	if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) +		platform_msi_update_chip_ops(info); + +	domain = msi_create_irq_domain(np, info, parent); +	if (domain) +		domain->bus_token = DOMAIN_BUS_PLATFORM_MSI; + +	return domain; +} + +/** + * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev + * @dev:		The device for which to allocate interrupts + * @nvec:		The number of interrupts to allocate + * @write_msi_msg:	Callback to write an interrupt message for @dev + * + * Returns: + * Zero for success, or an error code in case of failure + */ +int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, +				   irq_write_msi_msg_t write_msi_msg) +{ +	struct platform_msi_priv_data *priv_data; +	int err; + +	/* +	 * Limit the number of interrupts to 256 per device. Should we +	 * need to bump this up, DEV_ID_SHIFT should be adjusted +	 * accordingly (which would impact the max number of MSI +	 * capable devices). +	 */ +	if (!dev->msi_domain || !write_msi_msg || !nvec || +	    nvec > (1 << (32 - DEV_ID_SHIFT))) +		return -EINVAL; + +	if (dev->msi_domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) { +		dev_err(dev, "Incompatible msi_domain, giving up\n"); +		return -EINVAL; +	} + +	/* Already had a helping of MSI? Greed... */ +	if (!list_empty(dev_to_msi_list(dev))) +		return -EBUSY; + +	priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL); +	if (!priv_data) +		return -ENOMEM; + +	priv_data->devid = ida_simple_get(&platform_msi_devid_ida, +					  0, 1 << DEV_ID_SHIFT, GFP_KERNEL); +	if (priv_data->devid < 0) { +		err = priv_data->devid; +		goto out_free_data; +	} + +	priv_data->write_msg = write_msi_msg; + +	err = platform_msi_alloc_descs(dev, nvec, priv_data); +	if (err) +		goto out_free_id; + +	err = msi_domain_alloc_irqs(dev->msi_domain, dev, nvec); +	if (err) +		goto out_free_desc; + +	return 0; + +out_free_desc: +	platform_msi_free_descs(dev); +out_free_id: +	ida_simple_remove(&platform_msi_devid_ida, priv_data->devid); +out_free_data: +	kfree(priv_data); + +	return err; +} + +/** + * platform_msi_domain_free_irqs - Free MSI interrupts for @dev + * @dev:	The device for which to free interrupts + */ +void platform_msi_domain_free_irqs(struct device *dev) +{ +	struct msi_desc *desc; + +	desc = first_msi_entry(dev); +	if (desc) { +		struct platform_msi_priv_data *data; + +		data = desc->platform.msi_priv_data; + +		ida_simple_remove(&platform_msi_devid_ida, data->devid); +		kfree(data); +	} + +	msi_domain_free_irqs(dev->msi_domain, dev); +	platform_msi_free_descs(dev); +} diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 063f0ab15259..f80aaaf9f610 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -375,9 +375,7 @@ int platform_device_add(struct platform_device *pdev)  	while (--i >= 0) {  		struct resource *r = &pdev->resource[i]; -		unsigned long type = resource_type(r); - -		if (type == IORESOURCE_MEM || type == IORESOURCE_IO) +		if (r->parent)  			release_resource(r);  	} @@ -408,9 +406,7 @@ void platform_device_del(struct platform_device *pdev)  		for (i = 0; i < pdev->num_resources; i++) {  			struct resource *r = &pdev->resource[i]; -			unsigned long type = resource_type(r); - -			if (type == IORESOURCE_MEM || type == IORESOURCE_IO) +			if (r->parent)  				release_resource(r);  		}  	} diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index acef9f9f759a..652b5a367c1f 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -38,7 +38,7 @@ struct pm_clock_entry {   * @dev: The device for the given clock   * @ce: PM clock entry corresponding to the clock.   */ -static inline int __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce) +static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)  {  	int ret; @@ -50,8 +50,6 @@ static inline int __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)  			dev_err(dev, "%s: failed to enable clk %p, error %d\n",  				__func__, ce->clk, ret);  	} - -	return ret;  }  /** diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 0ee43c1056e0..16550c63d611 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -114,8 +114,12 @@ static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)  					stop_latency_ns, "stop");  } -static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) +static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev, +			bool timed)  { +	if (!timed) +		return GENPD_DEV_CALLBACK(genpd, int, start, dev); +  	return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,  					start_latency_ns, "start");  } @@ -136,41 +140,6 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)  	smp_mb__after_atomic();  } -static void genpd_acquire_lock(struct generic_pm_domain *genpd) -{ -	DEFINE_WAIT(wait); - -	mutex_lock(&genpd->lock); -	/* -	 * Wait for the domain to transition into either the active, -	 * or the power off state. -	 */ -	for (;;) { -		prepare_to_wait(&genpd->status_wait_queue, &wait, -				TASK_UNINTERRUPTIBLE); -		if (genpd->status == GPD_STATE_ACTIVE -		    || genpd->status == GPD_STATE_POWER_OFF) -			break; -		mutex_unlock(&genpd->lock); - -		schedule(); - -		mutex_lock(&genpd->lock); -	} -	finish_wait(&genpd->status_wait_queue, &wait); -} - -static void genpd_release_lock(struct generic_pm_domain *genpd) -{ -	mutex_unlock(&genpd->lock); -} - -static void genpd_set_active(struct generic_pm_domain *genpd) -{ -	if (genpd->resume_count == 0) -		genpd->status = GPD_STATE_ACTIVE; -} -  static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)  {  	s64 usecs64; @@ -244,6 +213,18 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)  }  /** + * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). + * @genpd: PM domait to power off. + * + * Queue up the execution of pm_genpd_poweroff() unless it's already been done + * before. + */ +static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) +{ +	queue_work(pm_wq, &genpd->power_off_work); +} + +/**   * __pm_genpd_poweron - Restore power to a given PM domain and its masters.   * @genpd: PM domain to power up.   * @@ -251,35 +232,14 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)   * resume a device belonging to it.   */  static int __pm_genpd_poweron(struct generic_pm_domain *genpd) -	__releases(&genpd->lock) __acquires(&genpd->lock)  {  	struct gpd_link *link; -	DEFINE_WAIT(wait);  	int ret = 0; -	/* If the domain's master is being waited for, we have to wait too. */ -	for (;;) { -		prepare_to_wait(&genpd->status_wait_queue, &wait, -				TASK_UNINTERRUPTIBLE); -		if (genpd->status != GPD_STATE_WAIT_MASTER) -			break; -		mutex_unlock(&genpd->lock); - -		schedule(); - -		mutex_lock(&genpd->lock); -	} -	finish_wait(&genpd->status_wait_queue, &wait); -  	if (genpd->status == GPD_STATE_ACTIVE  	    || (genpd->prepared_count > 0 && genpd->suspend_power_off))  		return 0; -	if (genpd->status != GPD_STATE_POWER_OFF) { -		genpd_set_active(genpd); -		return 0; -	} -  	if (genpd->cpuidle_data) {  		cpuidle_pause_and_lock();  		genpd->cpuidle_data->idle_state->disabled = true; @@ -294,20 +254,8 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)  	 */  	list_for_each_entry(link, &genpd->slave_links, slave_node) {  		genpd_sd_counter_inc(link->master); -		genpd->status = GPD_STATE_WAIT_MASTER; - -		mutex_unlock(&genpd->lock);  		ret = pm_genpd_poweron(link->master); - -		mutex_lock(&genpd->lock); - -		/* -		 * The "wait for parent" status is guaranteed not to change -		 * while the master is powering on. -		 */ -		genpd->status = GPD_STATE_POWER_OFF; -		wake_up_all(&genpd->status_wait_queue);  		if (ret) {  			genpd_sd_counter_dec(link->master);  			goto err; @@ -319,13 +267,16 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)  		goto err;   out: -	genpd_set_active(genpd); - +	genpd->status = GPD_STATE_ACTIVE;  	return 0;   err: -	list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) +	list_for_each_entry_continue_reverse(link, +					&genpd->slave_links, +					slave_node) {  		genpd_sd_counter_dec(link->master); +		genpd_queue_power_off_work(link->master); +	}  	return ret;  } @@ -356,20 +307,18 @@ int pm_genpd_name_poweron(const char *domain_name)  	return genpd ? pm_genpd_poweron(genpd) : -EINVAL;  } -static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd, -				     struct device *dev) -{ -	return GENPD_DEV_CALLBACK(genpd, int, start, dev); -} -  static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)  {  	return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,  					save_state_latency_ns, "state save");  } -static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) +static int genpd_restore_dev(struct generic_pm_domain *genpd, +			struct device *dev, bool timed)  { +	if (!timed) +		return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev); +  	return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,  					restore_state_latency_ns,  					"state restore"); @@ -416,133 +365,30 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,  }  /** - * __pm_genpd_save_device - Save the pre-suspend state of a device. - * @pdd: Domain data of the device to save the state of. - * @genpd: PM domain the device belongs to. - */ -static int __pm_genpd_save_device(struct pm_domain_data *pdd, -				  struct generic_pm_domain *genpd) -	__releases(&genpd->lock) __acquires(&genpd->lock) -{ -	struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); -	struct device *dev = pdd->dev; -	int ret = 0; - -	if (gpd_data->need_restore > 0) -		return 0; - -	/* -	 * If the value of the need_restore flag is still unknown at this point, -	 * we trust that pm_genpd_poweroff() has verified that the device is -	 * already runtime PM suspended. -	 */ -	if (gpd_data->need_restore < 0) { -		gpd_data->need_restore = 1; -		return 0; -	} - -	mutex_unlock(&genpd->lock); - -	genpd_start_dev(genpd, dev); -	ret = genpd_save_dev(genpd, dev); -	genpd_stop_dev(genpd, dev); - -	mutex_lock(&genpd->lock); - -	if (!ret) -		gpd_data->need_restore = 1; - -	return ret; -} - -/** - * __pm_genpd_restore_device - Restore the pre-suspend state of a device. - * @pdd: Domain data of the device to restore the state of. - * @genpd: PM domain the device belongs to. - */ -static void __pm_genpd_restore_device(struct pm_domain_data *pdd, -				      struct generic_pm_domain *genpd) -	__releases(&genpd->lock) __acquires(&genpd->lock) -{ -	struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); -	struct device *dev = pdd->dev; -	int need_restore = gpd_data->need_restore; - -	gpd_data->need_restore = 0; -	mutex_unlock(&genpd->lock); - -	genpd_start_dev(genpd, dev); - -	/* -	 * Call genpd_restore_dev() for recently added devices too (need_restore -	 * is negative then). -	 */ -	if (need_restore) -		genpd_restore_dev(genpd, dev); - -	mutex_lock(&genpd->lock); -} - -/** - * genpd_abort_poweroff - Check if a PM domain power off should be aborted. - * @genpd: PM domain to check. - * - * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during - * a "power off" operation, which means that a "power on" has occured in the - * meantime, or if its resume_count field is different from zero, which means - * that one of its devices has been resumed in the meantime. - */ -static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) -{ -	return genpd->status == GPD_STATE_WAIT_MASTER -		|| genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; -} - -/** - * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). - * @genpd: PM domait to power off. - * - * Queue up the execution of pm_genpd_poweroff() unless it's already been done - * before. - */ -static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) -{ -	queue_work(pm_wq, &genpd->power_off_work); -} - -/**   * pm_genpd_poweroff - Remove power from a given PM domain.   * @genpd: PM domain to power down.   *   * If all of the @genpd's devices have been suspended and all of its subdomains - * have been powered down, run the runtime suspend callbacks provided by all of - * the @genpd's devices' drivers and remove power from @genpd. + * have been powered down, remove power from @genpd.   */  static int pm_genpd_poweroff(struct generic_pm_domain *genpd) -	__releases(&genpd->lock) __acquires(&genpd->lock)  {  	struct pm_domain_data *pdd;  	struct gpd_link *link; -	unsigned int not_suspended; -	int ret = 0; +	unsigned int not_suspended = 0; - start:  	/*  	 * Do not try to power off the domain in the following situations:  	 * (1) The domain is already in the "power off" state. -	 * (2) The domain is waiting for its master to power up. -	 * (3) One of the domain's devices is being resumed right now. -	 * (4) System suspend is in progress. +	 * (2) System suspend is in progress.  	 */  	if (genpd->status == GPD_STATE_POWER_OFF -	    || genpd->status == GPD_STATE_WAIT_MASTER -	    || genpd->resume_count > 0 || genpd->prepared_count > 0) +	    || genpd->prepared_count > 0)  		return 0;  	if (atomic_read(&genpd->sd_count) > 0)  		return -EBUSY; -	not_suspended = 0;  	list_for_each_entry(pdd, &genpd->dev_list, list_node) {  		enum pm_qos_flags_status stat; @@ -560,41 +406,11 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)  	if (not_suspended > genpd->in_progress)  		return -EBUSY; -	if (genpd->poweroff_task) { -		/* -		 * Another instance of pm_genpd_poweroff() is executing -		 * callbacks, so tell it to start over and return. -		 */ -		genpd->status = GPD_STATE_REPEAT; -		return 0; -	} -  	if (genpd->gov && genpd->gov->power_down_ok) {  		if (!genpd->gov->power_down_ok(&genpd->domain))  			return -EAGAIN;  	} -	genpd->status = GPD_STATE_BUSY; -	genpd->poweroff_task = current; - -	list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { -		ret = atomic_read(&genpd->sd_count) == 0 ? -			__pm_genpd_save_device(pdd, genpd) : -EBUSY; - -		if (genpd_abort_poweroff(genpd)) -			goto out; - -		if (ret) { -			genpd_set_active(genpd); -			goto out; -		} - -		if (genpd->status == GPD_STATE_REPEAT) { -			genpd->poweroff_task = NULL; -			goto start; -		} -	} -  	if (genpd->cpuidle_data) {  		/*  		 * If cpuidle_data is set, cpuidle should turn the domain off @@ -607,14 +423,14 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)  		cpuidle_pause_and_lock();  		genpd->cpuidle_data->idle_state->disabled = false;  		cpuidle_resume_and_unlock(); -		goto out; +		return 0;  	}  	if (genpd->power_off) { -		if (atomic_read(&genpd->sd_count) > 0) { -			ret = -EBUSY; -			goto out; -		} +		int ret; + +		if (atomic_read(&genpd->sd_count) > 0) +			return -EBUSY;  		/*  		 * If sd_count > 0 at this point, one of the subdomains hasn't @@ -625,10 +441,8 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)  		 * happen very often).  		 */  		ret = genpd_power_off(genpd, true); -		if (ret == -EBUSY) { -			genpd_set_active(genpd); -			goto out; -		} +		if (ret) +			return ret;  	}  	genpd->status = GPD_STATE_POWER_OFF; @@ -638,10 +452,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)  		genpd_queue_power_off_work(link->master);  	} - out: -	genpd->poweroff_task = NULL; -	wake_up_all(&genpd->status_wait_queue); -	return ret; +	return 0;  }  /** @@ -654,9 +465,9 @@ static void genpd_power_off_work_fn(struct work_struct *work)  	genpd = container_of(work, struct generic_pm_domain, power_off_work); -	genpd_acquire_lock(genpd); +	mutex_lock(&genpd->lock);  	pm_genpd_poweroff(genpd); -	genpd_release_lock(genpd); +	mutex_unlock(&genpd->lock);  }  /** @@ -670,7 +481,6 @@ static void genpd_power_off_work_fn(struct work_struct *work)  static int pm_genpd_runtime_suspend(struct device *dev)  {  	struct generic_pm_domain *genpd; -	struct generic_pm_domain_data *gpd_data;  	bool (*stop_ok)(struct device *__dev);  	int ret; @@ -684,10 +494,16 @@ static int pm_genpd_runtime_suspend(struct device *dev)  	if (stop_ok && !stop_ok(dev))  		return -EBUSY; -	ret = genpd_stop_dev(genpd, dev); +	ret = genpd_save_dev(genpd, dev);  	if (ret)  		return ret; +	ret = genpd_stop_dev(genpd, dev); +	if (ret) { +		genpd_restore_dev(genpd, dev, true); +		return ret; +	} +  	/*  	 * If power.irq_safe is set, this routine will be run with interrupts  	 * off, so it can't use mutexes. @@ -696,16 +512,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)  		return 0;  	mutex_lock(&genpd->lock); - -	/* -	 * If we have an unknown state of the need_restore flag, it means none -	 * of the runtime PM callbacks has been invoked yet. Let's update the -	 * flag to reflect that the current state is active. -	 */ -	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); -	if (gpd_data->need_restore < 0) -		gpd_data->need_restore = 0; -  	genpd->in_progress++;  	pm_genpd_poweroff(genpd);  	genpd->in_progress--; @@ -725,8 +531,8 @@ static int pm_genpd_runtime_suspend(struct device *dev)  static int pm_genpd_runtime_resume(struct device *dev)  {  	struct generic_pm_domain *genpd; -	DEFINE_WAIT(wait);  	int ret; +	bool timed = true;  	dev_dbg(dev, "%s()\n", __func__); @@ -735,39 +541,21 @@ static int pm_genpd_runtime_resume(struct device *dev)  		return -EINVAL;  	/* If power.irq_safe, the PM domain is never powered off. */ -	if (dev->power.irq_safe) -		return genpd_start_dev_no_timing(genpd, dev); +	if (dev->power.irq_safe) { +		timed = false; +		goto out; +	}  	mutex_lock(&genpd->lock);  	ret = __pm_genpd_poweron(genpd); -	if (ret) { -		mutex_unlock(&genpd->lock); -		return ret; -	} -	genpd->status = GPD_STATE_BUSY; -	genpd->resume_count++; -	for (;;) { -		prepare_to_wait(&genpd->status_wait_queue, &wait, -				TASK_UNINTERRUPTIBLE); -		/* -		 * If current is the powering off task, we have been called -		 * reentrantly from one of the device callbacks, so we should -		 * not wait. -		 */ -		if (!genpd->poweroff_task || genpd->poweroff_task == current) -			break; -		mutex_unlock(&genpd->lock); +	mutex_unlock(&genpd->lock); -		schedule(); +	if (ret) +		return ret; -		mutex_lock(&genpd->lock); -	} -	finish_wait(&genpd->status_wait_queue, &wait); -	__pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd); -	genpd->resume_count--; -	genpd_set_active(genpd); -	wake_up_all(&genpd->status_wait_queue); -	mutex_unlock(&genpd->lock); + out: +	genpd_start_dev(genpd, dev, timed); +	genpd_restore_dev(genpd, dev, timed);  	return 0;  } @@ -883,7 +671,7 @@ static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd,  {  	struct gpd_link *link; -	if (genpd->status != GPD_STATE_POWER_OFF) +	if (genpd->status == GPD_STATE_ACTIVE)  		return;  	list_for_each_entry(link, &genpd->slave_links, slave_node) { @@ -960,14 +748,14 @@ static int pm_genpd_prepare(struct device *dev)  	if (resume_needed(dev, genpd))  		pm_runtime_resume(dev); -	genpd_acquire_lock(genpd); +	mutex_lock(&genpd->lock);  	if (genpd->prepared_count++ == 0) {  		genpd->suspended_count = 0;  		genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;  	} -	genpd_release_lock(genpd); +	mutex_unlock(&genpd->lock);  	if (genpd->suspend_power_off) {  		pm_runtime_put_noidle(dev); @@ -1102,7 +890,7 @@ static int pm_genpd_resume_noirq(struct device *dev)  	pm_genpd_sync_poweron(genpd, true);  	genpd->suspended_count--; -	return genpd_start_dev(genpd, dev); +	return genpd_start_dev(genpd, dev, true);  }  /** @@ -1230,7 +1018,7 @@ static int pm_genpd_thaw_noirq(struct device *dev)  	if (IS_ERR(genpd))  		return -EINVAL; -	return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev); +	return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev, true);  }  /** @@ -1324,7 +1112,7 @@ static int pm_genpd_restore_noirq(struct device *dev)  	pm_genpd_sync_poweron(genpd, true); -	return genpd_start_dev(genpd, dev); +	return genpd_start_dev(genpd, dev, true);  }  /** @@ -1440,7 +1228,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,  		gpd_data->td = *td;  	gpd_data->base.dev = dev; -	gpd_data->need_restore = -1;  	gpd_data->td.constraint_changed = true;  	gpd_data->td.effective_constraint_ns = -1;  	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; @@ -1502,7 +1289,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,  	if (IS_ERR(gpd_data))  		return PTR_ERR(gpd_data); -	genpd_acquire_lock(genpd); +	mutex_lock(&genpd->lock);  	if (genpd->prepared_count > 0) {  		ret = -EAGAIN; @@ -1519,7 +1306,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,  	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);   out: -	genpd_release_lock(genpd); +	mutex_unlock(&genpd->lock);  	if (ret)  		genpd_free_dev_data(dev, gpd_data); @@ -1563,7 +1350,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,  	gpd_data = to_gpd_data(pdd);  	dev_pm_qos_remove_notifier(dev, &gpd_data->nb); -	genpd_acquire_lock(genpd); +	mutex_lock(&genpd->lock);  	if (genpd->prepared_count > 0) {  		ret = -EAGAIN; @@ -1578,14 +1365,14 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,  	list_del_init(&pdd->list_node); -	genpd_release_lock(genpd); +	mutex_unlock(&genpd->lock);  	genpd_free_dev_data(dev, gpd_data);  	return 0;   out: -	genpd_release_lock(genpd); +	mutex_unlock(&genpd->lock);  	dev_pm_qos_add_notifier(dev, &gpd_data->nb);  	return ret; @@ -1606,17 +1393,9 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,  	    || genpd == subdomain)  		return -EINVAL; - start: -	genpd_acquire_lock(genpd); +	mutex_lock(&genpd->lock);  	mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); -	if (subdomain->status != GPD_STATE_POWER_OFF -	    && subdomain->status != GPD_STATE_ACTIVE) { -		mutex_unlock(&subdomain->lock); -		genpd_release_lock(genpd); -		goto start; -	} -  	if (genpd->status == GPD_STATE_POWER_OFF  	    &&  subdomain->status != GPD_STATE_POWER_OFF) {  		ret = -EINVAL; @@ -1644,7 +1423,7 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,   out:  	mutex_unlock(&subdomain->lock); -	genpd_release_lock(genpd); +	mutex_unlock(&genpd->lock);  	return ret;  } @@ -1692,8 +1471,14 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,  	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))  		return -EINVAL; - start: -	genpd_acquire_lock(genpd); +	mutex_lock(&genpd->lock); + +	if (!list_empty(&subdomain->slave_links) || subdomain->device_count) { +		pr_warn("%s: unable to remove subdomain %s\n", genpd->name, +			subdomain->name); +		ret = -EBUSY; +		goto out; +	}  	list_for_each_entry(link, &genpd->master_links, master_node) {  		if (link->slave != subdomain) @@ -1701,13 +1486,6 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,  		mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); -		if (subdomain->status != GPD_STATE_POWER_OFF -		    && subdomain->status != GPD_STATE_ACTIVE) { -			mutex_unlock(&subdomain->lock); -			genpd_release_lock(genpd); -			goto start; -		} -  		list_del(&link->master_node);  		list_del(&link->slave_node);  		kfree(link); @@ -1720,7 +1498,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,  		break;  	} -	genpd_release_lock(genpd); +out: +	mutex_unlock(&genpd->lock);  	return ret;  } @@ -1744,7 +1523,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)  	if (IS_ERR_OR_NULL(genpd) || state < 0)  		return -EINVAL; -	genpd_acquire_lock(genpd); +	mutex_lock(&genpd->lock);  	if (genpd->cpuidle_data) {  		ret = -EEXIST; @@ -1775,7 +1554,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)  	genpd_recalc_cpu_exit_latency(genpd);   out: -	genpd_release_lock(genpd); +	mutex_unlock(&genpd->lock);  	return ret;   err: @@ -1812,7 +1591,7 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)  	if (IS_ERR_OR_NULL(genpd))  		return -EINVAL; -	genpd_acquire_lock(genpd); +	mutex_lock(&genpd->lock);  	cpuidle_data = genpd->cpuidle_data;  	if (!cpuidle_data) { @@ -1830,7 +1609,7 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)  	kfree(cpuidle_data);   out: -	genpd_release_lock(genpd); +	mutex_unlock(&genpd->lock);  	return ret;  } @@ -1912,9 +1691,6 @@ void pm_genpd_init(struct generic_pm_domain *genpd,  	genpd->in_progress = 0;  	atomic_set(&genpd->sd_count, 0);  	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; -	init_waitqueue_head(&genpd->status_wait_queue); -	genpd->poweroff_task = NULL; -	genpd->resume_count = 0;  	genpd->device_count = 0;  	genpd->max_off_time_ns = -1;  	genpd->max_off_time_changed = true; @@ -1952,6 +1728,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd,  	list_add(&genpd->gpd_list_node, &gpd_list);  	mutex_unlock(&gpd_list_lock);  } +EXPORT_SYMBOL_GPL(pm_genpd_init);  #ifdef CONFIG_PM_GENERIC_DOMAINS_OF  /* @@ -2125,7 +1902,7 @@ EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);  /**   * genpd_dev_pm_detach - Detach a device from its PM domain. - * @dev: Device to attach. + * @dev: Device to detach.   * @power_off: Currently not used   *   * Try to locate a corresponding generic PM domain, which the device was @@ -2183,7 +1960,10 @@ static void genpd_dev_pm_sync(struct device *dev)   * Both generic and legacy Samsung-specific DT bindings are supported to keep   * backwards compatibility with existing DTBs.   * - * Returns 0 on successfully attached PM domain or negative error code. + * Returns 0 on successfully attached PM domain or negative error code. Note + * that if a power-domain exists for the device, but it cannot be found or + * turned on, then return -EPROBE_DEFER to ensure that the device is not + * probed and to re-try again later.   */  int genpd_dev_pm_attach(struct device *dev)  { @@ -2220,7 +2000,7 @@ int genpd_dev_pm_attach(struct device *dev)  		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",  			__func__, PTR_ERR(pd));  		of_node_put(dev->of_node); -		return PTR_ERR(pd); +		return -EPROBE_DEFER;  	}  	dev_dbg(dev, "adding to PM domain %s\n", pd->name); @@ -2238,14 +2018,15 @@ int genpd_dev_pm_attach(struct device *dev)  		dev_err(dev, "failed to add to PM domain %s: %d",  			pd->name, ret);  		of_node_put(dev->of_node); -		return ret; +		goto out;  	}  	dev->pm_domain->detach = genpd_dev_pm_detach;  	dev->pm_domain->sync = genpd_dev_pm_sync; -	pm_genpd_poweron(pd); +	ret = pm_genpd_poweron(pd); -	return 0; +out: +	return ret ? -EPROBE_DEFER : 0;  }  EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);  #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ @@ -2293,9 +2074,6 @@ static int pm_genpd_summary_one(struct seq_file *s,  {  	static const char * const status_lookup[] = {  		[GPD_STATE_ACTIVE] = "on", -		[GPD_STATE_WAIT_MASTER] = "wait-master", -		[GPD_STATE_BUSY] = "busy", -		[GPD_STATE_REPEAT] = "off-in-progress",  		[GPD_STATE_POWER_OFF] = "off"  	};  	struct pm_domain_data *pm_data; @@ -2309,7 +2087,7 @@ static int pm_genpd_summary_one(struct seq_file *s,  	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))  		goto exit; -	seq_printf(s, "%-30s  %-15s  ", genpd->name, status_lookup[genpd->status]); +	seq_printf(s, "%-30s  %-15s ", genpd->name, status_lookup[genpd->status]);  	/*  	 * Modifications on the list require holding locks on both @@ -2344,8 +2122,8 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data)  	struct generic_pm_domain *genpd;  	int ret = 0; -	seq_puts(s, "    domain                      status         slaves\n"); -	seq_puts(s, "           /device                                      runtime status\n"); +	seq_puts(s, "domain                          status          slaves\n"); +	seq_puts(s, "    /device                                             runtime status\n");  	seq_puts(s, "----------------------------------------------------------------------\n");  	ret = mutex_lock_interruptible(&gpd_list_lock); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 30b7bbfdc558..1710c26ba097 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1377,7 +1377,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)  	if (dev->power.direct_complete) {  		if (pm_runtime_status_suspended(dev)) {  			pm_runtime_disable(dev); -			if (pm_runtime_suspended_if_enabled(dev)) +			if (pm_runtime_status_suspended(dev))  				goto Complete;  			pm_runtime_enable(dev); diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 677fb2843553..28cd75c535b0 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c @@ -11,6 +11,7 @@   * published by the Free Software Foundation.   */ +#include <linux/cpu.h>  #include <linux/kernel.h>  #include <linux/errno.h>  #include <linux/err.h> @@ -51,10 +52,17 @@   *		order.   * @dynamic:	not-created from static DT entries.   * @available:	true/false - marks if this OPP as available or not + * @turbo:	true if turbo (boost) OPP   * @rate:	Frequency in hertz - * @u_volt:	Nominal voltage in microvolts corresponding to this OPP + * @u_volt:	Target voltage in microvolts corresponding to this OPP + * @u_volt_min:	Minimum voltage in microvolts corresponding to this OPP + * @u_volt_max:	Maximum voltage in microvolts corresponding to this OPP + * @u_amp:	Maximum current drawn by the device in microamperes + * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's + *		frequency from any other OPP's frequency.   * @dev_opp:	points back to the device_opp struct this opp belongs to   * @rcu_head:	RCU callback head used for deferred freeing + * @np:		OPP's device node.   *   * This structure stores the OPP information for a given device.   */ @@ -63,11 +71,34 @@ struct dev_pm_opp {  	bool available;  	bool dynamic; +	bool turbo;  	unsigned long rate; +  	unsigned long u_volt; +	unsigned long u_volt_min; +	unsigned long u_volt_max; +	unsigned long u_amp; +	unsigned long clock_latency_ns;  	struct device_opp *dev_opp;  	struct rcu_head rcu_head; + +	struct device_node *np; +}; + +/** + * struct device_list_opp - devices managed by 'struct device_opp' + * @node:	list node + * @dev:	device to which the struct object belongs + * @rcu_head:	RCU callback head used for deferred freeing + * + * This is an internal data structure maintaining the list of devices that are + * managed by 'struct device_opp'. + */ +struct device_list_opp { +	struct list_head node; +	const struct device *dev; +	struct rcu_head rcu_head;  };  /** @@ -77,10 +108,12 @@ struct dev_pm_opp {   *		list.   *		RCU usage: nodes are not modified in the list of device_opp,   *		however addition is possible and is secured by dev_opp_list_lock - * @dev:	device pointer   * @srcu_head:	notifier head to notify the OPP availability changes.   * @rcu_head:	RCU callback head used for deferred freeing + * @dev_list:	list of devices that share these OPPs   * @opp_list:	list of opps + * @np:		struct device_node pointer for opp's DT node. + * @shared_opp: OPP is shared between multiple devices.   *   * This is an internal data structure maintaining the link to opps attached to   * a device. This structure is not meant to be shared to users as it is @@ -93,10 +126,15 @@ struct dev_pm_opp {  struct device_opp {  	struct list_head node; -	struct device *dev;  	struct srcu_notifier_head srcu_head;  	struct rcu_head rcu_head; +	struct list_head dev_list;  	struct list_head opp_list; + +	struct device_node *np; +	unsigned long clock_latency_ns_max; +	bool shared_opp; +	struct dev_pm_opp *suspend_opp;  };  /* @@ -110,12 +148,44 @@ static DEFINE_MUTEX(dev_opp_list_lock);  #define opp_rcu_lockdep_assert()					\  do {									\ -	rcu_lockdep_assert(rcu_read_lock_held() ||			\ -				lockdep_is_held(&dev_opp_list_lock),	\ +	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\ +				!lockdep_is_held(&dev_opp_list_lock),	\  			   "Missing rcu_read_lock() or "		\  			   "dev_opp_list_lock protection");		\  } while (0) +static struct device_list_opp *_find_list_dev(const struct device *dev, +					      struct device_opp *dev_opp) +{ +	struct device_list_opp *list_dev; + +	list_for_each_entry(list_dev, &dev_opp->dev_list, node) +		if (list_dev->dev == dev) +			return list_dev; + +	return NULL; +} + +static struct device_opp *_managed_opp(const struct device_node *np) +{ +	struct device_opp *dev_opp; + +	list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) { +		if (dev_opp->np == np) { +			/* +			 * Multiple devices can point to the same OPP table and +			 * so will have same node-pointer, np. +			 * +			 * But the OPPs will be considered as shared only if the +			 * OPP table contains a "opp-shared" property. +			 */ +			return dev_opp->shared_opp ? dev_opp : NULL; +		} +	} + +	return NULL; +} +  /**   * _find_device_opp() - find device_opp struct using device pointer   * @dev:	device pointer used to lookup device OPPs @@ -132,21 +202,18 @@ do {									\   */  static struct device_opp *_find_device_opp(struct device *dev)  { -	struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); +	struct device_opp *dev_opp; -	if (unlikely(IS_ERR_OR_NULL(dev))) { +	if (IS_ERR_OR_NULL(dev)) {  		pr_err("%s: Invalid parameters\n", __func__);  		return ERR_PTR(-EINVAL);  	} -	list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) { -		if (tmp_dev_opp->dev == dev) { -			dev_opp = tmp_dev_opp; -			break; -		} -	} +	list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) +		if (_find_list_dev(dev, dev_opp)) +			return dev_opp; -	return dev_opp; +	return ERR_PTR(-ENODEV);  }  /** @@ -172,7 +239,7 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)  	opp_rcu_lockdep_assert();  	tmp_opp = rcu_dereference(opp); -	if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) +	if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)  		pr_err("%s: Invalid parameters\n", __func__);  	else  		v = tmp_opp->u_volt; @@ -204,7 +271,7 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)  	opp_rcu_lockdep_assert();  	tmp_opp = rcu_dereference(opp); -	if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) +	if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)  		pr_err("%s: Invalid parameters\n", __func__);  	else  		f = tmp_opp->rate; @@ -214,6 +281,94 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)  EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);  /** + * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not + * @opp: opp for which turbo mode is being verified + * + * Turbo OPPs are not for normal use, and can be enabled (under certain + * conditions) for short duration of times to finish high throughput work + * quickly. Running on them for longer times may overheat the chip. + * + * Return: true if opp is turbo opp, else false. + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. This means that opp which could have been fetched by + * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are + * under RCU lock. The pointer returned by the opp_find_freq family must be + * used in the same section as the usage of this function with the pointer + * prior to unlocking with rcu_read_unlock() to maintain the integrity of the + * pointer. + */ +bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) +{ +	struct dev_pm_opp *tmp_opp; + +	opp_rcu_lockdep_assert(); + +	tmp_opp = rcu_dereference(opp); +	if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) { +		pr_err("%s: Invalid parameters\n", __func__); +		return false; +	} + +	return tmp_opp->turbo; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); + +/** + * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds + * @dev:	device for which we do this operation + * + * Return: This function returns the max clock latency in nanoseconds. + * + * Locking: This function takes rcu_read_lock(). + */ +unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) +{ +	struct device_opp *dev_opp; +	unsigned long clock_latency_ns; + +	rcu_read_lock(); + +	dev_opp = _find_device_opp(dev); +	if (IS_ERR(dev_opp)) +		clock_latency_ns = 0; +	else +		clock_latency_ns = dev_opp->clock_latency_ns_max; + +	rcu_read_unlock(); +	return clock_latency_ns; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); + +/** + * dev_pm_opp_get_suspend_opp() - Get suspend opp + * @dev:	device for which we do this operation + * + * Return: This function returns pointer to the suspend opp if it is + * defined and available, otherwise it returns NULL. + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. The reason for the same is that the opp pointer which is + * returned will remain valid for use with opp_get_{voltage, freq} only while + * under the locked area. The pointer returned must be used prior to unlocking + * with rcu_read_unlock() to maintain the integrity of the pointer. + */ +struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev) +{ +	struct device_opp *dev_opp; + +	opp_rcu_lockdep_assert(); + +	dev_opp = _find_device_opp(dev); +	if (IS_ERR(dev_opp) || !dev_opp->suspend_opp || +	    !dev_opp->suspend_opp->available) +		return NULL; + +	return dev_opp->suspend_opp; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp); + +/**   * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list   * @dev:	device for which we do this operation   * @@ -407,18 +562,57 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,  }  EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); +/* List-dev Helpers */ +static void _kfree_list_dev_rcu(struct rcu_head *head) +{ +	struct device_list_opp *list_dev; + +	list_dev = container_of(head, struct device_list_opp, rcu_head); +	kfree_rcu(list_dev, rcu_head); +} + +static void _remove_list_dev(struct device_list_opp *list_dev, +			     struct device_opp *dev_opp) +{ +	list_del(&list_dev->node); +	call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head, +		  _kfree_list_dev_rcu); +} + +static struct device_list_opp *_add_list_dev(const struct device *dev, +					     struct device_opp *dev_opp) +{ +	struct device_list_opp *list_dev; + +	list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL); +	if (!list_dev) +		return NULL; + +	/* Initialize list-dev */ +	list_dev->dev = dev; +	list_add_rcu(&list_dev->node, &dev_opp->dev_list); + +	return list_dev; +} +  /** - * _add_device_opp() - Allocate a new device OPP table + * _add_device_opp() - Find device OPP table or allocate a new one   * @dev:	device for which we do this operation   * - * New device node which uses OPPs - used when multiple devices with OPP tables - * are maintained. + * It tries to find an existing table first, if it couldn't find one, it + * allocates a new OPP table and returns that.   *   * Return: valid device_opp pointer if success, else NULL.   */  static struct device_opp *_add_device_opp(struct device *dev)  {  	struct device_opp *dev_opp; +	struct device_list_opp *list_dev; + +	/* Check for existing list for 'dev' first */ +	dev_opp = _find_device_opp(dev); +	if (!IS_ERR(dev_opp)) +		return dev_opp;  	/*  	 * Allocate a new device OPP table. In the infrequent case where a new @@ -428,7 +622,14 @@ static struct device_opp *_add_device_opp(struct device *dev)  	if (!dev_opp)  		return NULL; -	dev_opp->dev = dev; +	INIT_LIST_HEAD(&dev_opp->dev_list); + +	list_dev = _add_list_dev(dev, dev_opp); +	if (!list_dev) { +		kfree(dev_opp); +		return NULL; +	} +  	srcu_init_notifier_head(&dev_opp->srcu_head);  	INIT_LIST_HEAD(&dev_opp->opp_list); @@ -438,6 +639,185 @@ static struct device_opp *_add_device_opp(struct device *dev)  }  /** + * _kfree_device_rcu() - Free device_opp RCU handler + * @head:	RCU head + */ +static void _kfree_device_rcu(struct rcu_head *head) +{ +	struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head); + +	kfree_rcu(device_opp, rcu_head); +} + +/** + * _remove_device_opp() - Removes a device OPP table + * @dev_opp: device OPP table to be removed. + * + * Removes/frees device OPP table it it doesn't contain any OPPs. + */ +static void _remove_device_opp(struct device_opp *dev_opp) +{ +	struct device_list_opp *list_dev; + +	if (!list_empty(&dev_opp->opp_list)) +		return; + +	list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp, +				    node); + +	_remove_list_dev(list_dev, dev_opp); + +	/* dev_list must be empty now */ +	WARN_ON(!list_empty(&dev_opp->dev_list)); + +	list_del_rcu(&dev_opp->node); +	call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head, +		  _kfree_device_rcu); +} + +/** + * _kfree_opp_rcu() - Free OPP RCU handler + * @head:	RCU head + */ +static void _kfree_opp_rcu(struct rcu_head *head) +{ +	struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head); + +	kfree_rcu(opp, rcu_head); +} + +/** + * _opp_remove()  - Remove an OPP from a table definition + * @dev_opp:	points back to the device_opp struct this opp belongs to + * @opp:	pointer to the OPP to remove + * @notify:	OPP_EVENT_REMOVE notification should be sent or not + * + * This function removes an opp definition from the opp list. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * It is assumed that the caller holds required mutex for an RCU updater + * strategy. + */ +static void _opp_remove(struct device_opp *dev_opp, +			struct dev_pm_opp *opp, bool notify) +{ +	/* +	 * Notify the changes in the availability of the operable +	 * frequency/voltage list. +	 */ +	if (notify) +		srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp); +	list_del_rcu(&opp->node); +	call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); + +	_remove_device_opp(dev_opp); +} + +/** + * dev_pm_opp_remove()  - Remove an OPP from OPP list + * @dev:	device for which we do this operation + * @freq:	OPP to remove with matching 'freq' + * + * This function removes an opp from the opp list. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + */ +void dev_pm_opp_remove(struct device *dev, unsigned long freq) +{ +	struct dev_pm_opp *opp; +	struct device_opp *dev_opp; +	bool found = false; + +	/* Hold our list modification lock here */ +	mutex_lock(&dev_opp_list_lock); + +	dev_opp = _find_device_opp(dev); +	if (IS_ERR(dev_opp)) +		goto unlock; + +	list_for_each_entry(opp, &dev_opp->opp_list, node) { +		if (opp->rate == freq) { +			found = true; +			break; +		} +	} + +	if (!found) { +		dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", +			 __func__, freq); +		goto unlock; +	} + +	_opp_remove(dev_opp, opp, true); +unlock: +	mutex_unlock(&dev_opp_list_lock); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_remove); + +static struct dev_pm_opp *_allocate_opp(struct device *dev, +					struct device_opp **dev_opp) +{ +	struct dev_pm_opp *opp; + +	/* allocate new OPP node */ +	opp = kzalloc(sizeof(*opp), GFP_KERNEL); +	if (!opp) +		return NULL; + +	INIT_LIST_HEAD(&opp->node); + +	*dev_opp = _add_device_opp(dev); +	if (!*dev_opp) { +		kfree(opp); +		return NULL; +	} + +	return opp; +} + +static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, +		    struct device_opp *dev_opp) +{ +	struct dev_pm_opp *opp; +	struct list_head *head = &dev_opp->opp_list; + +	/* +	 * Insert new OPP in order of increasing frequency and discard if +	 * already present. +	 * +	 * Need to use &dev_opp->opp_list in the condition part of the 'for' +	 * loop, don't replace it with head otherwise it will become an infinite +	 * loop. +	 */ +	list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { +		if (new_opp->rate > opp->rate) { +			head = &opp->node; +			continue; +		} + +		if (new_opp->rate < opp->rate) +			break; + +		/* Duplicate OPPs */ +		dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", +			 __func__, opp->rate, opp->u_volt, opp->available, +			 new_opp->rate, new_opp->u_volt, new_opp->available); + +		return opp->available && new_opp->u_volt == opp->u_volt ? +			0 : -EEXIST; +	} + +	new_opp->dev_opp = dev_opp; +	list_add_rcu(&new_opp->node, head); + +	return 0; +} + +/**   * _opp_add_dynamic() - Allocate a dynamic OPP.   * @dev:	device for which we do this operation   * @freq:	Frequency in Hz for this OPP @@ -467,64 +847,29 @@ static struct device_opp *_add_device_opp(struct device *dev)  static int _opp_add_dynamic(struct device *dev, unsigned long freq,  			    long u_volt, bool dynamic)  { -	struct device_opp *dev_opp = NULL; -	struct dev_pm_opp *opp, *new_opp; -	struct list_head *head; +	struct device_opp *dev_opp; +	struct dev_pm_opp *new_opp;  	int ret; -	/* allocate new OPP node */ -	new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL); -	if (!new_opp) -		return -ENOMEM; -  	/* Hold our list modification lock here */  	mutex_lock(&dev_opp_list_lock); +	new_opp = _allocate_opp(dev, &dev_opp); +	if (!new_opp) { +		ret = -ENOMEM; +		goto unlock; +	} +  	/* populate the opp table */  	new_opp->rate = freq;  	new_opp->u_volt = u_volt;  	new_opp->available = true;  	new_opp->dynamic = dynamic; -	/* Check for existing list for 'dev' */ -	dev_opp = _find_device_opp(dev); -	if (IS_ERR(dev_opp)) { -		dev_opp = _add_device_opp(dev); -		if (!dev_opp) { -			ret = -ENOMEM; -			goto free_opp; -		} - -		head = &dev_opp->opp_list; -		goto list_add; -	} - -	/* -	 * Insert new OPP in order of increasing frequency -	 * and discard if already present -	 */ -	head = &dev_opp->opp_list; -	list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { -		if (new_opp->rate <= opp->rate) -			break; -		else -			head = &opp->node; -	} - -	/* Duplicate OPPs ? */ -	if (new_opp->rate == opp->rate) { -		ret = opp->available && new_opp->u_volt == opp->u_volt ? -			0 : -EEXIST; - -		dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", -			 __func__, opp->rate, opp->u_volt, opp->available, -			 new_opp->rate, new_opp->u_volt, new_opp->available); +	ret = _opp_add(dev, new_opp, dev_opp); +	if (ret)  		goto free_opp; -	} -list_add: -	new_opp->dev_opp = dev_opp; -	list_add_rcu(&new_opp->node, head);  	mutex_unlock(&dev_opp_list_lock);  	/* @@ -535,20 +880,52 @@ list_add:  	return 0;  free_opp: +	_opp_remove(dev_opp, new_opp, false); +unlock:  	mutex_unlock(&dev_opp_list_lock); -	kfree(new_opp);  	return ret;  } +/* TODO: Support multiple regulators */ +static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev) +{ +	u32 microvolt[3] = {0}; +	int count, ret; + +	count = of_property_count_u32_elems(opp->np, "opp-microvolt"); +	if (!count) +		return 0; + +	/* There can be one or three elements here */ +	if (count != 1 && count != 3) { +		dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n", +			__func__, count); +		return -EINVAL; +	} + +	ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt, +					 count); +	if (ret) { +		dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__, +			ret); +		return -EINVAL; +	} + +	opp->u_volt = microvolt[0]; +	opp->u_volt_min = microvolt[1]; +	opp->u_volt_max = microvolt[2]; + +	return 0; +} +  /** - * dev_pm_opp_add()  - Add an OPP table from a table definitions + * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)   * @dev:	device for which we do this operation - * @freq:	Frequency in Hz for this OPP - * @u_volt:	Voltage in uVolts for this OPP + * @np:		device node   * - * This function adds an opp definition to the opp list and returns status. - * The opp is made available by default and it can be controlled using - * dev_pm_opp_enable/disable functions. + * This function adds an opp definition to the opp list and returns status. The + * opp can be controlled using dev_pm_opp_enable/disable functions and may be + * removed by dev_pm_opp_remove.   *   * Locking: The internal device_opp and opp structures are RCU protected.   * Hence this function internally uses RCU updater strategy with mutex locks @@ -562,108 +939,119 @@ free_opp:   * -EEXIST	Freq are same and volt are different OR   *		Duplicate OPPs (both freq and volt are same) and !opp->available   * -ENOMEM	Memory allocation failure + * -EINVAL	Failed parsing the OPP node   */ -int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) +static int _opp_add_static_v2(struct device *dev, struct device_node *np)  { -	return _opp_add_dynamic(dev, freq, u_volt, true); -} -EXPORT_SYMBOL_GPL(dev_pm_opp_add); +	struct device_opp *dev_opp; +	struct dev_pm_opp *new_opp; +	u64 rate; +	u32 val; +	int ret; -/** - * _kfree_opp_rcu() - Free OPP RCU handler - * @head:	RCU head - */ -static void _kfree_opp_rcu(struct rcu_head *head) -{ -	struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head); +	/* Hold our list modification lock here */ +	mutex_lock(&dev_opp_list_lock); -	kfree_rcu(opp, rcu_head); -} +	new_opp = _allocate_opp(dev, &dev_opp); +	if (!new_opp) { +		ret = -ENOMEM; +		goto unlock; +	} -/** - * _kfree_device_rcu() - Free device_opp RCU handler - * @head:	RCU head - */ -static void _kfree_device_rcu(struct rcu_head *head) -{ -	struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head); +	ret = of_property_read_u64(np, "opp-hz", &rate); +	if (ret < 0) { +		dev_err(dev, "%s: opp-hz not found\n", __func__); +		goto free_opp; +	} -	kfree_rcu(device_opp, rcu_head); -} +	/* +	 * Rate is defined as an unsigned long in clk API, and so casting +	 * explicitly to its type. Must be fixed once rate is 64 bit +	 * guaranteed in clk API. +	 */ +	new_opp->rate = (unsigned long)rate; +	new_opp->turbo = of_property_read_bool(np, "turbo-mode"); + +	new_opp->np = np; +	new_opp->dynamic = false; +	new_opp->available = true; + +	if (!of_property_read_u32(np, "clock-latency-ns", &val)) +		new_opp->clock_latency_ns = val; + +	ret = opp_get_microvolt(new_opp, dev); +	if (ret) +		goto free_opp; + +	if (!of_property_read_u32(new_opp->np, "opp-microamp", &val)) +		new_opp->u_amp = val; + +	ret = _opp_add(dev, new_opp, dev_opp); +	if (ret) +		goto free_opp; + +	/* OPP to select on device suspend */ +	if (of_property_read_bool(np, "opp-suspend")) { +		if (dev_opp->suspend_opp) +			dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n", +				 __func__, dev_opp->suspend_opp->rate, +				 new_opp->rate); +		else +			dev_opp->suspend_opp = new_opp; +	} + +	if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max) +		dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns; + +	mutex_unlock(&dev_opp_list_lock); + +	pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n", +		 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt, +		 new_opp->u_volt_min, new_opp->u_volt_max, +		 new_opp->clock_latency_ns); -/** - * _opp_remove()  - Remove an OPP from a table definition - * @dev_opp:	points back to the device_opp struct this opp belongs to - * @opp:	pointer to the OPP to remove - * - * This function removes an opp definition from the opp list. - * - * Locking: The internal device_opp and opp structures are RCU protected. - * It is assumed that the caller holds required mutex for an RCU updater - * strategy. - */ -static void _opp_remove(struct device_opp *dev_opp, -			struct dev_pm_opp *opp) -{  	/*  	 * Notify the changes in the availability of the operable  	 * frequency/voltage list.  	 */ -	srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp); -	list_del_rcu(&opp->node); -	call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); +	srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp); +	return 0; -	if (list_empty(&dev_opp->opp_list)) { -		list_del_rcu(&dev_opp->node); -		call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head, -			  _kfree_device_rcu); -	} +free_opp: +	_opp_remove(dev_opp, new_opp, false); +unlock: +	mutex_unlock(&dev_opp_list_lock); +	return ret;  }  /** - * dev_pm_opp_remove()  - Remove an OPP from OPP list + * dev_pm_opp_add()  - Add an OPP table from a table definitions   * @dev:	device for which we do this operation - * @freq:	OPP to remove with matching 'freq' + * @freq:	Frequency in Hz for this OPP + * @u_volt:	Voltage in uVolts for this OPP   * - * This function removes an opp from the opp list. + * This function adds an opp definition to the opp list and returns status. + * The opp is made available by default and it can be controlled using + * dev_pm_opp_enable/disable functions.   *   * Locking: The internal device_opp and opp structures are RCU protected.   * Hence this function internally uses RCU updater strategy with mutex locks   * to keep the integrity of the internal data structures. Callers should ensure   * that this function is *NOT* called under RCU protection or in contexts where   * mutex cannot be locked. + * + * Return: + * 0		On success OR + *		Duplicate OPPs (both freq and volt are same) and opp->available + * -EEXIST	Freq are same and volt are different OR + *		Duplicate OPPs (both freq and volt are same) and !opp->available + * -ENOMEM	Memory allocation failure   */ -void dev_pm_opp_remove(struct device *dev, unsigned long freq) +int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)  { -	struct dev_pm_opp *opp; -	struct device_opp *dev_opp; -	bool found = false; - -	/* Hold our list modification lock here */ -	mutex_lock(&dev_opp_list_lock); - -	dev_opp = _find_device_opp(dev); -	if (IS_ERR(dev_opp)) -		goto unlock; - -	list_for_each_entry(opp, &dev_opp->opp_list, node) { -		if (opp->rate == freq) { -			found = true; -			break; -		} -	} - -	if (!found) { -		dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", -			 __func__, freq); -		goto unlock; -	} - -	_opp_remove(dev_opp, opp); -unlock: -	mutex_unlock(&dev_opp_list_lock); +	return _opp_add_dynamic(dev, freq, u_volt, true);  } -EXPORT_SYMBOL_GPL(dev_pm_opp_remove); +EXPORT_SYMBOL_GPL(dev_pm_opp_add);  /**   * _opp_set_availability() - helper to set the availability of an opp @@ -825,28 +1213,179 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);  #ifdef CONFIG_OF  /** - * of_init_opp_table() - Initialize opp table from device tree + * of_free_opp_table() - Free OPP table entries created from static DT entries   * @dev:	device pointer used to lookup device OPPs.   * - * Register the initial OPP table with the OPP library for given device. + * Free OPPs created using static entries present in DT.   *   * Locking: The internal device_opp and opp structures are RCU protected.   * Hence this function indirectly uses RCU updater strategy with mutex locks   * to keep the integrity of the internal data structures. Callers should ensure   * that this function is *NOT* called under RCU protection or in contexts where   * mutex cannot be locked. - * - * Return: - * 0		On success OR - *		Duplicate OPPs (both freq and volt are same) and opp->available - * -EEXIST	Freq are same and volt are different OR - *		Duplicate OPPs (both freq and volt are same) and !opp->available - * -ENOMEM	Memory allocation failure - * -ENODEV	when 'operating-points' property is not found or is invalid data - *		in device node. - * -ENODATA	when empty 'operating-points' property is found   */ -int of_init_opp_table(struct device *dev) +void of_free_opp_table(struct device *dev) +{ +	struct device_opp *dev_opp; +	struct dev_pm_opp *opp, *tmp; + +	/* Hold our list modification lock here */ +	mutex_lock(&dev_opp_list_lock); + +	/* Check for existing list for 'dev' */ +	dev_opp = _find_device_opp(dev); +	if (IS_ERR(dev_opp)) { +		int error = PTR_ERR(dev_opp); + +		if (error != -ENODEV) +			WARN(1, "%s: dev_opp: %d\n", +			     IS_ERR_OR_NULL(dev) ? +					"Invalid device" : dev_name(dev), +			     error); +		goto unlock; +	} + +	/* Find if dev_opp manages a single device */ +	if (list_is_singular(&dev_opp->dev_list)) { +		/* Free static OPPs */ +		list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) { +			if (!opp->dynamic) +				_opp_remove(dev_opp, opp, true); +		} +	} else { +		_remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp); +	} + +unlock: +	mutex_unlock(&dev_opp_list_lock); +} +EXPORT_SYMBOL_GPL(of_free_opp_table); + +void of_cpumask_free_opp_table(cpumask_var_t cpumask) +{ +	struct device *cpu_dev; +	int cpu; + +	WARN_ON(cpumask_empty(cpumask)); + +	for_each_cpu(cpu, cpumask) { +		cpu_dev = get_cpu_device(cpu); +		if (!cpu_dev) { +			pr_err("%s: failed to get cpu%d device\n", __func__, +			       cpu); +			continue; +		} + +		of_free_opp_table(cpu_dev); +	} +} +EXPORT_SYMBOL_GPL(of_cpumask_free_opp_table); + +/* Returns opp descriptor node from its phandle. Caller must do of_node_put() */ +static struct device_node * +_of_get_opp_desc_node_from_prop(struct device *dev, const struct property *prop) +{ +	struct device_node *opp_np; + +	opp_np = of_find_node_by_phandle(be32_to_cpup(prop->value)); +	if (!opp_np) { +		dev_err(dev, "%s: Prop: %s contains invalid opp desc phandle\n", +			__func__, prop->name); +		return ERR_PTR(-EINVAL); +	} + +	return opp_np; +} + +/* Returns opp descriptor node for a device. Caller must do of_node_put() */ +static struct device_node *_of_get_opp_desc_node(struct device *dev) +{ +	const struct property *prop; + +	prop = of_find_property(dev->of_node, "operating-points-v2", NULL); +	if (!prop) +		return ERR_PTR(-ENODEV); +	if (!prop->value) +		return ERR_PTR(-ENODATA); + +	/* +	 * TODO: Support for multiple OPP tables. +	 * +	 * There should be only ONE phandle present in "operating-points-v2" +	 * property. +	 */ +	if (prop->length != sizeof(__be32)) { +		dev_err(dev, "%s: Invalid opp desc phandle\n", __func__); +		return ERR_PTR(-EINVAL); +	} + +	return _of_get_opp_desc_node_from_prop(dev, prop); +} + +/* Initializes OPP tables based on new bindings */ +static int _of_init_opp_table_v2(struct device *dev, +				 const struct property *prop) +{ +	struct device_node *opp_np, *np; +	struct device_opp *dev_opp; +	int ret = 0, count = 0; + +	if (!prop->value) +		return -ENODATA; + +	/* Get opp node */ +	opp_np = _of_get_opp_desc_node_from_prop(dev, prop); +	if (IS_ERR(opp_np)) +		return PTR_ERR(opp_np); + +	dev_opp = _managed_opp(opp_np); +	if (dev_opp) { +		/* OPPs are already managed */ +		if (!_add_list_dev(dev, dev_opp)) +			ret = -ENOMEM; +		goto put_opp_np; +	} + +	/* We have opp-list node now, iterate over it and add OPPs */ +	for_each_available_child_of_node(opp_np, np) { +		count++; + +		ret = _opp_add_static_v2(dev, np); +		if (ret) { +			dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, +				ret); +			goto free_table; +		} +	} + +	/* There should be one of more OPP defined */ +	if (WARN_ON(!count)) { +		ret = -ENOENT; +		goto put_opp_np; +	} + +	dev_opp = _find_device_opp(dev); +	if (WARN_ON(IS_ERR(dev_opp))) { +		ret = PTR_ERR(dev_opp); +		goto free_table; +	} + +	dev_opp->np = opp_np; +	dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared"); + +	of_node_put(opp_np); +	return 0; + +free_table: +	of_free_opp_table(dev); +put_opp_np: +	of_node_put(opp_np); + +	return ret; +} + +/* Initializes OPP tables based on old-deprecated bindings */ +static int _of_init_opp_table_v1(struct device *dev)  {  	const struct property *prop;  	const __be32 *val; @@ -881,47 +1420,177 @@ int of_init_opp_table(struct device *dev)  	return 0;  } -EXPORT_SYMBOL_GPL(of_init_opp_table);  /** - * of_free_opp_table() - Free OPP table entries created from static DT entries + * of_init_opp_table() - Initialize opp table from device tree   * @dev:	device pointer used to lookup device OPPs.   * - * Free OPPs created using static entries present in DT. + * Register the initial OPP table with the OPP library for given device.   *   * Locking: The internal device_opp and opp structures are RCU protected.   * Hence this function indirectly uses RCU updater strategy with mutex locks   * to keep the integrity of the internal data structures. Callers should ensure   * that this function is *NOT* called under RCU protection or in contexts where   * mutex cannot be locked. + * + * Return: + * 0		On success OR + *		Duplicate OPPs (both freq and volt are same) and opp->available + * -EEXIST	Freq are same and volt are different OR + *		Duplicate OPPs (both freq and volt are same) and !opp->available + * -ENOMEM	Memory allocation failure + * -ENODEV	when 'operating-points' property is not found or is invalid data + *		in device node. + * -ENODATA	when empty 'operating-points' property is found + * -EINVAL	when invalid entries are found in opp-v2 table   */ -void of_free_opp_table(struct device *dev) +int of_init_opp_table(struct device *dev)  { +	const struct property *prop; + +	/* +	 * OPPs have two version of bindings now. The older one is deprecated, +	 * try for the new binding first. +	 */ +	prop = of_find_property(dev->of_node, "operating-points-v2", NULL); +	if (!prop) { +		/* +		 * Try old-deprecated bindings for backward compatibility with +		 * older dtbs. +		 */ +		return _of_init_opp_table_v1(dev); +	} + +	return _of_init_opp_table_v2(dev, prop); +} +EXPORT_SYMBOL_GPL(of_init_opp_table); + +int of_cpumask_init_opp_table(cpumask_var_t cpumask) +{ +	struct device *cpu_dev; +	int cpu, ret = 0; + +	WARN_ON(cpumask_empty(cpumask)); + +	for_each_cpu(cpu, cpumask) { +		cpu_dev = get_cpu_device(cpu); +		if (!cpu_dev) { +			pr_err("%s: failed to get cpu%d device\n", __func__, +			       cpu); +			continue; +		} + +		ret = of_init_opp_table(cpu_dev); +		if (ret) { +			pr_err("%s: couldn't find opp table for cpu:%d, %d\n", +			       __func__, cpu, ret); + +			/* Free all other OPPs */ +			of_cpumask_free_opp_table(cpumask); +			break; +		} +	} + +	return ret; +} +EXPORT_SYMBOL_GPL(of_cpumask_init_opp_table); + +/* Required only for V1 bindings, as v2 can manage it from DT itself */ +int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask) +{ +	struct device_list_opp *list_dev;  	struct device_opp *dev_opp; -	struct dev_pm_opp *opp, *tmp; +	struct device *dev; +	int cpu, ret = 0; -	/* Check for existing list for 'dev' */ -	dev_opp = _find_device_opp(dev); +	rcu_read_lock(); + +	dev_opp = _find_device_opp(cpu_dev);  	if (IS_ERR(dev_opp)) { -		int error = PTR_ERR(dev_opp); -		if (error != -ENODEV) -			WARN(1, "%s: dev_opp: %d\n", -			     IS_ERR_OR_NULL(dev) ? -					"Invalid device" : dev_name(dev), -			     error); -		return; +		ret = -EINVAL; +		goto out_rcu_read_unlock;  	} -	/* Hold our list modification lock here */ -	mutex_lock(&dev_opp_list_lock); +	for_each_cpu(cpu, cpumask) { +		if (cpu == cpu_dev->id) +			continue; -	/* Free static OPPs */ -	list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) { -		if (!opp->dynamic) -			_opp_remove(dev_opp, opp); +		dev = get_cpu_device(cpu); +		if (!dev) { +			dev_err(cpu_dev, "%s: failed to get cpu%d device\n", +				__func__, cpu); +			continue; +		} + +		list_dev = _add_list_dev(dev, dev_opp); +		if (!list_dev) { +			dev_err(dev, "%s: failed to add list-dev for cpu%d device\n", +				__func__, cpu); +			continue; +		}  	} +out_rcu_read_unlock: +	rcu_read_unlock(); -	mutex_unlock(&dev_opp_list_lock); +	return 0;  } -EXPORT_SYMBOL_GPL(of_free_opp_table); +EXPORT_SYMBOL_GPL(set_cpus_sharing_opps); + +/* + * Works only for OPP v2 bindings. + * + * cpumask should be already set to mask of cpu_dev->id. + * Returns -ENOENT if operating-points-v2 bindings aren't supported. + */ +int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask) +{ +	struct device_node *np, *tmp_np; +	struct device *tcpu_dev; +	int cpu, ret = 0; + +	/* Get OPP descriptor node */ +	np = _of_get_opp_desc_node(cpu_dev); +	if (IS_ERR(np)) { +		dev_dbg(cpu_dev, "%s: Couldn't find opp node: %ld\n", __func__, +			PTR_ERR(np)); +		return -ENOENT; +	} + +	/* OPPs are shared ? */ +	if (!of_property_read_bool(np, "opp-shared")) +		goto put_cpu_node; + +	for_each_possible_cpu(cpu) { +		if (cpu == cpu_dev->id) +			continue; + +		tcpu_dev = get_cpu_device(cpu); +		if (!tcpu_dev) { +			dev_err(cpu_dev, "%s: failed to get cpu%d device\n", +				__func__, cpu); +			ret = -ENODEV; +			goto put_cpu_node; +		} + +		/* Get OPP descriptor node */ +		tmp_np = _of_get_opp_desc_node(tcpu_dev); +		if (IS_ERR(tmp_np)) { +			dev_err(tcpu_dev, "%s: Couldn't find opp node: %ld\n", +				__func__, PTR_ERR(tmp_np)); +			ret = PTR_ERR(tmp_np); +			goto put_cpu_node; +		} + +		/* CPUs are sharing opp node */ +		if (np == tmp_np) +			cpumask_set_cpu(cpu, cpumask); + +		of_node_put(tmp_np); +	} + +put_cpu_node: +	of_node_put(np); +	return ret; +} +EXPORT_SYMBOL_GPL(of_get_cpus_sharing_opps);  #endif diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index f1a5d95e7b20..998fa6b23084 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -73,6 +73,8 @@ extern int pm_qos_sysfs_add_resume_latency(struct device *dev);  extern void pm_qos_sysfs_remove_resume_latency(struct device *dev);  extern int pm_qos_sysfs_add_flags(struct device *dev);  extern void pm_qos_sysfs_remove_flags(struct device *dev); +extern int pm_qos_sysfs_add_latency_tolerance(struct device *dev); +extern void pm_qos_sysfs_remove_latency_tolerance(struct device *dev);  #else /* CONFIG_PM */ diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index e56d538d039e..7f3646e459cb 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -883,3 +883,40 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)  	mutex_unlock(&dev_pm_qos_mtx);  	return ret;  } + +/** + * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace + * @dev: Device whose latency tolerance to expose + */ +int dev_pm_qos_expose_latency_tolerance(struct device *dev) +{ +	int ret; + +	if (!dev->power.set_latency_tolerance) +		return -EINVAL; + +	mutex_lock(&dev_pm_qos_sysfs_mtx); +	ret = pm_qos_sysfs_add_latency_tolerance(dev); +	mutex_unlock(&dev_pm_qos_sysfs_mtx); + +	return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance); + +/** + * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace + * @dev: Device whose latency tolerance to hide + */ +void dev_pm_qos_hide_latency_tolerance(struct device *dev) +{ +	mutex_lock(&dev_pm_qos_sysfs_mtx); +	pm_qos_sysfs_remove_latency_tolerance(dev); +	mutex_unlock(&dev_pm_qos_sysfs_mtx); + +	/* Remove the request from user space now */ +	pm_runtime_get_sync(dev); +	dev_pm_qos_update_user_latency_tolerance(dev, +		PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT); +	pm_runtime_put(dev); +} +EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance); diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index d2be3f9c211c..a7b46798c81d 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -738,6 +738,17 @@ void pm_qos_sysfs_remove_flags(struct device *dev)  	sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group);  } +int pm_qos_sysfs_add_latency_tolerance(struct device *dev) +{ +	return sysfs_merge_group(&dev->kobj, +				 &pm_qos_latency_tolerance_attr_group); +} + +void pm_qos_sysfs_remove_latency_tolerance(struct device *dev) +{ +	sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); +} +  void rpm_sysfs_remove(struct device *dev)  {  	sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); diff --git a/drivers/base/property.c b/drivers/base/property.c index f3f6d167f3f1..2d75366c61e0 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -16,6 +16,8 @@  #include <linux/of.h>  #include <linux/of_address.h>  #include <linux/property.h> +#include <linux/etherdevice.h> +#include <linux/phy.h>  /**   * device_add_property_set - Add a collection of properties to a device object. @@ -27,9 +29,10 @@   */  void device_add_property_set(struct device *dev, struct property_set *pset)  { -	if (pset) -		pset->fwnode.type = FWNODE_PDATA; +	if (!pset) +		return; +	pset->fwnode.type = FWNODE_PDATA;  	set_secondary_fwnode(dev, &pset->fwnode);  }  EXPORT_SYMBOL_GPL(device_add_property_set); @@ -153,6 +156,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_present);   *	   %-ENODATA if the property does not have a value,   *	   %-EPROTO if the property is not an array of numbers,   *	   %-EOVERFLOW if the size of the property is not as expected. + *	   %-ENXIO if no suitable firmware interface is present.   */  int device_property_read_u8_array(struct device *dev, const char *propname,  				  u8 *val, size_t nval) @@ -177,6 +181,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u8_array);   *	   %-ENODATA if the property does not have a value,   *	   %-EPROTO if the property is not an array of numbers,   *	   %-EOVERFLOW if the size of the property is not as expected. + *	   %-ENXIO if no suitable firmware interface is present.   */  int device_property_read_u16_array(struct device *dev, const char *propname,  				   u16 *val, size_t nval) @@ -201,6 +206,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u16_array);   *	   %-ENODATA if the property does not have a value,   *	   %-EPROTO if the property is not an array of numbers,   *	   %-EOVERFLOW if the size of the property is not as expected. + *	   %-ENXIO if no suitable firmware interface is present.   */  int device_property_read_u32_array(struct device *dev, const char *propname,  				   u32 *val, size_t nval) @@ -225,6 +231,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u32_array);   *	   %-ENODATA if the property does not have a value,   *	   %-EPROTO if the property is not an array of numbers,   *	   %-EOVERFLOW if the size of the property is not as expected. + *	   %-ENXIO if no suitable firmware interface is present.   */  int device_property_read_u64_array(struct device *dev, const char *propname,  				   u64 *val, size_t nval) @@ -249,6 +256,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u64_array);   *	   %-ENODATA if the property does not have a value,   *	   %-EPROTO or %-EILSEQ if the property is not an array of strings,   *	   %-EOVERFLOW if the size of the property is not as expected. + *	   %-ENXIO if no suitable firmware interface is present.   */  int device_property_read_string_array(struct device *dev, const char *propname,  				      const char **val, size_t nval) @@ -270,6 +278,7 @@ EXPORT_SYMBOL_GPL(device_property_read_string_array);   *	   %-EINVAL if given arguments are not valid,   *	   %-ENODATA if the property does not have a value,   *	   %-EPROTO or %-EILSEQ if the property type is not a string. + *	   %-ENXIO if no suitable firmware interface is present.   */  int device_property_read_string(struct device *dev, const char *propname,  				const char **val) @@ -291,9 +300,11 @@ EXPORT_SYMBOL_GPL(device_property_read_string);  	else if (is_acpi_node(_fwnode_)) \  		_ret_ = acpi_dev_prop_read(to_acpi_node(_fwnode_), _propname_, \  					   _proptype_, _val_, _nval_); \ -	else \ +	else if (is_pset(_fwnode_)) \  		_ret_ = pset_prop_read_array(to_pset(_fwnode_), _propname_, \  					     _proptype_, _val_, _nval_); \ +	else \ +		_ret_ = -ENXIO; \  	_ret_; \  }) @@ -431,9 +442,10 @@ int fwnode_property_read_string_array(struct fwnode_handle *fwnode,  	else if (is_acpi_node(fwnode))  		return acpi_dev_prop_read(to_acpi_node(fwnode), propname,  					  DEV_PROP_STRING, val, nval); - -	return pset_prop_read_array(to_pset(fwnode), propname, -				    DEV_PROP_STRING, val, nval); +	else if (is_pset(fwnode)) +		return pset_prop_read_array(to_pset(fwnode), propname, +					    DEV_PROP_STRING, val, nval); +	return -ENXIO;  }  EXPORT_SYMBOL_GPL(fwnode_property_read_string_array); @@ -461,7 +473,8 @@ int fwnode_property_read_string(struct fwnode_handle *fwnode,  		return acpi_dev_prop_read(to_acpi_node(fwnode), propname,  					  DEV_PROP_STRING, val, 1); -	return -ENXIO; +	return pset_prop_read_array(to_pset(fwnode), propname, +				    DEV_PROP_STRING, val, 1);  }  EXPORT_SYMBOL_GPL(fwnode_property_read_string); @@ -533,3 +546,81 @@ bool device_dma_is_coherent(struct device *dev)  	return coherent;  }  EXPORT_SYMBOL_GPL(device_dma_is_coherent); + +/** + * device_get_phy_mode - Get phy mode for given device + * @dev:	Pointer to the given device + * + * The function gets phy interface string from property 'phy-mode' or + * 'phy-connection-type', and return its index in phy_modes table, or errno in + * error case. + */ +int device_get_phy_mode(struct device *dev) +{ +	const char *pm; +	int err, i; + +	err = device_property_read_string(dev, "phy-mode", &pm); +	if (err < 0) +		err = device_property_read_string(dev, +						  "phy-connection-type", &pm); +	if (err < 0) +		return err; + +	for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) +		if (!strcasecmp(pm, phy_modes(i))) +			return i; + +	return -ENODEV; +} +EXPORT_SYMBOL_GPL(device_get_phy_mode); + +static void *device_get_mac_addr(struct device *dev, +				 const char *name, char *addr, +				 int alen) +{ +	int ret = device_property_read_u8_array(dev, name, addr, alen); + +	if (ret == 0 && alen == ETH_ALEN && is_valid_ether_addr(addr)) +		return addr; +	return NULL; +} + +/** + * device_get_mac_address - Get the MAC for a given device + * @dev:	Pointer to the device + * @addr:	Address of buffer to store the MAC in + * @alen:	Length of the buffer pointed to by addr, should be ETH_ALEN + * + * Search the firmware node for the best MAC address to use.  'mac-address' is + * checked first, because that is supposed to contain to "most recent" MAC + * address. If that isn't set, then 'local-mac-address' is checked next, + * because that is the default address.  If that isn't set, then the obsolete + * 'address' is checked, just in case we're using an old device tree. + * + * Note that the 'address' property is supposed to contain a virtual address of + * the register set, but some DTS files have redefined that property to be the + * MAC address. + * + * All-zero MAC addresses are rejected, because those could be properties that + * exist in the firmware tables, but were not updated by the firmware.  For + * example, the DTS could define 'mac-address' and 'local-mac-address', with + * zero MAC addresses.  Some older U-Boots only initialized 'local-mac-address'. + * In this case, the real MAC is in 'local-mac-address', and 'mac-address' + * exists but is all zeros. +*/ +void *device_get_mac_address(struct device *dev, char *addr, int alen) +{ +	char *res; + +	res = device_get_mac_addr(dev, "mac-address", addr, alen); +	if (res) +		return res; + +	res = device_get_mac_addr(dev, "local-mac-address", addr, alen); +	if (res) +		return res; + +	return device_get_mac_addr(dev, "address", addr, alen); +} +EXPORT_SYMBOL(device_get_mac_address); diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index b2b2849fc6d3..cc557886ab23 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -136,14 +136,20 @@ struct regmap {  	/* if set, the HW registers are known to match map->reg_defaults */  	bool no_sync_defaults; -	struct reg_default *patch; +	struct reg_sequence *patch;  	int patch_regs; -	/* if set, converts bulk rw to single rw */ -	bool use_single_rw; +	/* if set, converts bulk read to single read */ +	bool use_single_read; +	/* if set, converts bulk read to single read */ +	bool use_single_write;  	/* if set, the device supports multi write mode */  	bool can_multi_write; +	/* if set, raw reads/writes are limited to this size */ +	size_t max_raw_read; +	size_t max_raw_write; +  	struct rb_root range_tree;  	void *selector_work_buf;	/* Scratch buffer used for selector */  }; diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index 81751a49d8bf..56486d92c4e7 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -296,11 +296,20 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,  	if (!blk)  		return -ENOMEM; -	present = krealloc(rbnode->cache_present, -		    BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL); -	if (!present) { -		kfree(blk); -		return -ENOMEM; +	if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) { +		present = krealloc(rbnode->cache_present, +				   BITS_TO_LONGS(blklen) * sizeof(*present), +				   GFP_KERNEL); +		if (!present) { +			kfree(blk); +			return -ENOMEM; +		} + +		memset(present + BITS_TO_LONGS(rbnode->blklen), 0, +		       (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen)) +		       * sizeof(*present)); +	} else { +		present = rbnode->cache_present;  	}  	/* insert the register value in the correct place in the rbnode block */ diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index b9862d741a56..6f8a13ec32a4 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -729,7 +729,7 @@ int regcache_sync_block(struct regmap *map, void *block,  			unsigned int block_base, unsigned int start,  			unsigned int end)  { -	if (regmap_can_raw_write(map) && !map->use_single_rw) +	if (regmap_can_raw_write(map) && !map->use_single_write)  		return regcache_sync_block_raw(map, block, cache_present,  					       block_base, start, end);  	else diff --git a/drivers/base/regmap/regmap-ac97.c b/drivers/base/regmap/regmap-ac97.c index 8d304e2a943d..c03ebfd4c731 100644 --- a/drivers/base/regmap/regmap-ac97.c +++ b/drivers/base/regmap/regmap-ac97.c @@ -78,37 +78,24 @@ static const struct regmap_bus ac97_regmap_bus = {  	.reg_read = regmap_ac97_reg_read,  }; -/** - * regmap_init_ac97(): Initialise AC'97 register map - * - * @ac97: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer to - * a struct regmap. - */ -struct regmap *regmap_init_ac97(struct snd_ac97 *ac97, -				const struct regmap_config *config) +struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97, +				  const struct regmap_config *config, +				  struct lock_class_key *lock_key, +				  const char *lock_name)  { -	return regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config); +	return __regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config, +			     lock_key, lock_name);  } -EXPORT_SYMBOL_GPL(regmap_init_ac97); +EXPORT_SYMBOL_GPL(__regmap_init_ac97); -/** - * devm_regmap_init_ac97(): Initialise AC'97 register map - * - * @ac97: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer - * to a struct regmap.  The regmap will be automatically freed by the - * device management code. - */ -struct regmap *devm_regmap_init_ac97(struct snd_ac97 *ac97, -				     const struct regmap_config *config) +struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97, +				       const struct regmap_config *config, +				       struct lock_class_key *lock_key, +				       const char *lock_name)  { -	return devm_regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config); +	return __devm_regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config, +				  lock_key, lock_name);  } -EXPORT_SYMBOL_GPL(devm_regmap_init_ac97); +EXPORT_SYMBOL_GPL(__devm_regmap_init_ac97);  MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index 5799a0b9e6cc..f42f2bac6466 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -469,6 +469,87 @@ static const struct file_operations regmap_access_fops = {  	.llseek = default_llseek,  }; +static ssize_t regmap_cache_only_write_file(struct file *file, +					    const char __user *user_buf, +					    size_t count, loff_t *ppos) +{ +	struct regmap *map = container_of(file->private_data, +					  struct regmap, cache_only); +	ssize_t result; +	bool was_enabled, require_sync = false; +	int err; + +	map->lock(map->lock_arg); + +	was_enabled = map->cache_only; + +	result = debugfs_write_file_bool(file, user_buf, count, ppos); +	if (result < 0) { +		map->unlock(map->lock_arg); +		return result; +	} + +	if (map->cache_only && !was_enabled) { +		dev_warn(map->dev, "debugfs cache_only=Y forced\n"); +		add_taint(TAINT_USER, LOCKDEP_STILL_OK); +	} else if (!map->cache_only && was_enabled) { +		dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n"); +		require_sync = true; +	} + +	map->unlock(map->lock_arg); + +	if (require_sync) { +		err = regcache_sync(map); +		if (err) +			dev_err(map->dev, "Failed to sync cache %d\n", err); +	} + +	return result; +} + +static const struct file_operations regmap_cache_only_fops = { +	.open = simple_open, +	.read = debugfs_read_file_bool, +	.write = regmap_cache_only_write_file, +}; + +static ssize_t regmap_cache_bypass_write_file(struct file *file, +					      const char __user *user_buf, +					      size_t count, loff_t *ppos) +{ +	struct regmap *map = container_of(file->private_data, +					  struct regmap, cache_bypass); +	ssize_t result; +	bool was_enabled; + +	map->lock(map->lock_arg); + +	was_enabled = map->cache_bypass; + +	result = debugfs_write_file_bool(file, user_buf, count, ppos); +	if (result < 0) +		goto out; + +	if (map->cache_bypass && !was_enabled) { +		dev_warn(map->dev, "debugfs cache_bypass=Y forced\n"); +		add_taint(TAINT_USER, LOCKDEP_STILL_OK); +	} else if (!map->cache_bypass && was_enabled) { +		dev_warn(map->dev, "debugfs cache_bypass=N forced\n"); +	} + +out: +	map->unlock(map->lock_arg); + +	return result; +} + +static const struct file_operations regmap_cache_bypass_fops = { +	.open = simple_open, +	.read = debugfs_read_file_bool, +	.write = regmap_cache_bypass_write_file, +}; +  void regmap_debugfs_init(struct regmap *map, const char *name)  {  	struct rb_node *next; @@ -518,10 +599,11 @@ void regmap_debugfs_init(struct regmap *map, const char *name)  	if (map->max_register || regmap_readable(map, 0)) {  		umode_t registers_mode; -		if (IS_ENABLED(REGMAP_ALLOW_WRITE_DEBUGFS)) -			registers_mode = 0600; -		else -			registers_mode = 0400; +#if defined(REGMAP_ALLOW_WRITE_DEBUGFS) +		registers_mode = 0600; +#else +		registers_mode = 0400; +#endif  		debugfs_create_file("registers", registers_mode, map->debugfs,  				    map, ®map_map_fops); @@ -530,12 +612,13 @@ void regmap_debugfs_init(struct regmap *map, const char *name)  	}  	if (map->cache_type) { -		debugfs_create_bool("cache_only", 0400, map->debugfs, -				    &map->cache_only); +		debugfs_create_file("cache_only", 0600, map->debugfs, +				    &map->cache_only, ®map_cache_only_fops);  		debugfs_create_bool("cache_dirty", 0400, map->debugfs,  				    &map->cache_dirty); -		debugfs_create_bool("cache_bypass", 0400, map->debugfs, -				    &map->cache_bypass); +		debugfs_create_file("cache_bypass", 0600, map->debugfs, +				    &map->cache_bypass, +				    ®map_cache_bypass_fops);  	}  	next = rb_first(&map->range_tree); diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c index 4b76e33110a2..1a8ec3b2b601 100644 --- a/drivers/base/regmap/regmap-i2c.c +++ b/drivers/base/regmap/regmap-i2c.c @@ -209,11 +209,60 @@ static struct regmap_bus regmap_i2c = {  	.val_format_endian_default = REGMAP_ENDIAN_BIG,  }; +static int regmap_i2c_smbus_i2c_write(void *context, const void *data, +				      size_t count) +{ +	struct device *dev = context; +	struct i2c_client *i2c = to_i2c_client(dev); + +	if (count < 1) +		return -EINVAL; +	if (count >= I2C_SMBUS_BLOCK_MAX) +		return -E2BIG; + +	--count; +	return i2c_smbus_write_i2c_block_data(i2c, ((u8 *)data)[0], count, +					      ((u8 *)data + 1)); +} + +static int regmap_i2c_smbus_i2c_read(void *context, const void *reg, +				     size_t reg_size, void *val, +				     size_t val_size) +{ +	struct device *dev = context; +	struct i2c_client *i2c = to_i2c_client(dev); +	int ret; + +	if (reg_size != 1 || val_size < 1) +		return -EINVAL; +	if (val_size >= I2C_SMBUS_BLOCK_MAX) +		return -E2BIG; + +	ret = i2c_smbus_read_i2c_block_data(i2c, ((u8 *)reg)[0], val_size, val); +	if (ret == val_size) +		return 0; +	else if (ret < 0) +		return ret; +	else +		return -EIO; +} + +static struct regmap_bus regmap_i2c_smbus_i2c_block = { +	.write = regmap_i2c_smbus_i2c_write, +	.read = regmap_i2c_smbus_i2c_read, +	.max_raw_read = I2C_SMBUS_BLOCK_MAX, +	.max_raw_write = I2C_SMBUS_BLOCK_MAX, +}; +  static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,  					const struct regmap_config *config)  {  	if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C))  		return ®map_i2c; +	else if (config->reg_bits == 8 && +		 i2c_check_functionality(i2c->adapter, +					 I2C_FUNC_SMBUS_I2C_BLOCK)) +		return ®map_i2c_smbus_i2c_block;  	else if (config->val_bits == 16 && config->reg_bits == 8 &&  		 i2c_check_functionality(i2c->adapter,  					 I2C_FUNC_SMBUS_WORD_DATA)) @@ -233,47 +282,34 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,  	return ERR_PTR(-ENOTSUPP);  } -/** - * regmap_init_i2c(): Initialise register map - * - * @i2c: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer to - * a struct regmap. - */ -struct regmap *regmap_init_i2c(struct i2c_client *i2c, -			       const struct regmap_config *config) +struct regmap *__regmap_init_i2c(struct i2c_client *i2c, +				 const struct regmap_config *config, +				 struct lock_class_key *lock_key, +				 const char *lock_name)  {  	const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config);  	if (IS_ERR(bus))  		return ERR_CAST(bus); -	return regmap_init(&i2c->dev, bus, &i2c->dev, config); +	return __regmap_init(&i2c->dev, bus, &i2c->dev, config, +			     lock_key, lock_name);  } -EXPORT_SYMBOL_GPL(regmap_init_i2c); +EXPORT_SYMBOL_GPL(__regmap_init_i2c); -/** - * devm_regmap_init_i2c(): Initialise managed register map - * - * @i2c: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer - * to a struct regmap.  The regmap will be automatically freed by the - * device management code. - */ -struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c, -				    const struct regmap_config *config) +struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c, +				      const struct regmap_config *config, +				      struct lock_class_key *lock_key, +				      const char *lock_name)  {  	const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config);  	if (IS_ERR(bus))  		return ERR_CAST(bus); -	return devm_regmap_init(&i2c->dev, bus, &i2c->dev, config); +	return __devm_regmap_init(&i2c->dev, bus, &i2c->dev, config, +				  lock_key, lock_name);  } -EXPORT_SYMBOL_GPL(devm_regmap_init_i2c); +EXPORT_SYMBOL_GPL(__devm_regmap_init_i2c);  MODULE_LICENSE("GPL"); diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 2597600a5d26..38d1f72d869c 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -209,7 +209,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)  	 * Read in the statuses, using a single bulk read if possible  	 * in order to reduce the I/O overheads.  	 */ -	if (!map->use_single_rw && map->reg_stride == 1 && +	if (!map->use_single_read && map->reg_stride == 1 &&  	    data->irq_reg_stride == 1) {  		u8 *buf8 = data->status_reg_buf;  		u16 *buf16 = data->status_reg_buf; @@ -398,7 +398,7 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,  	else  		d->irq_reg_stride = 1; -	if (!map->use_single_rw && map->reg_stride == 1 && +	if (!map->use_single_read && map->reg_stride == 1 &&  	    d->irq_reg_stride == 1) {  		d->status_reg_buf = kmalloc(map->format.val_bytes *  					    chip->num_regs, GFP_KERNEL); diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c index 04a329a377e9..426a57e41ac7 100644 --- a/drivers/base/regmap/regmap-mmio.c +++ b/drivers/base/regmap/regmap-mmio.c @@ -296,20 +296,11 @@ err_free:  	return ERR_PTR(ret);  } -/** - * regmap_init_mmio_clk(): Initialise register map with register clock - * - * @dev: Device that will be interacted with - * @clk_id: register clock consumer ID - * @regs: Pointer to memory-mapped IO region - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer to - * a struct regmap. - */ -struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id, -				    void __iomem *regs, -				    const struct regmap_config *config) +struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id, +				      void __iomem *regs, +				      const struct regmap_config *config, +				      struct lock_class_key *lock_key, +				      const char *lock_name)  {  	struct regmap_mmio_context *ctx; @@ -317,25 +308,17 @@ struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,  	if (IS_ERR(ctx))  		return ERR_CAST(ctx); -	return regmap_init(dev, ®map_mmio, ctx, config); +	return __regmap_init(dev, ®map_mmio, ctx, config, +			     lock_key, lock_name);  } -EXPORT_SYMBOL_GPL(regmap_init_mmio_clk); - -/** - * devm_regmap_init_mmio_clk(): Initialise managed register map with clock - * - * @dev: Device that will be interacted with - * @clk_id: register clock consumer ID - * @regs: Pointer to memory-mapped IO region - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer - * to a struct regmap.  The regmap will be automatically freed by the - * device management code. - */ -struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id, -					 void __iomem *regs, -					 const struct regmap_config *config) +EXPORT_SYMBOL_GPL(__regmap_init_mmio_clk); + +struct regmap *__devm_regmap_init_mmio_clk(struct device *dev, +					   const char *clk_id, +					   void __iomem *regs, +					   const struct regmap_config *config, +					   struct lock_class_key *lock_key, +					   const char *lock_name)  {  	struct regmap_mmio_context *ctx; @@ -343,8 +326,9 @@ struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,  	if (IS_ERR(ctx))  		return ERR_CAST(ctx); -	return devm_regmap_init(dev, ®map_mmio, ctx, config); +	return __devm_regmap_init(dev, ®map_mmio, ctx, config, +				  lock_key, lock_name);  } -EXPORT_SYMBOL_GPL(devm_regmap_init_mmio_clk); +EXPORT_SYMBOL_GPL(__devm_regmap_init_mmio_clk);  MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c index 53d1148e80a0..edd9a839d004 100644 --- a/drivers/base/regmap/regmap-spi.c +++ b/drivers/base/regmap/regmap-spi.c @@ -113,37 +113,24 @@ static struct regmap_bus regmap_spi = {  	.val_format_endian_default = REGMAP_ENDIAN_BIG,  }; -/** - * regmap_init_spi(): Initialise register map - * - * @spi: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer to - * a struct regmap. - */ -struct regmap *regmap_init_spi(struct spi_device *spi, -			       const struct regmap_config *config) +struct regmap *__regmap_init_spi(struct spi_device *spi, +				 const struct regmap_config *config, +				 struct lock_class_key *lock_key, +				 const char *lock_name)  { -	return regmap_init(&spi->dev, ®map_spi, &spi->dev, config); +	return __regmap_init(&spi->dev, ®map_spi, &spi->dev, config, +			     lock_key, lock_name);  } -EXPORT_SYMBOL_GPL(regmap_init_spi); +EXPORT_SYMBOL_GPL(__regmap_init_spi); -/** - * devm_regmap_init_spi(): Initialise register map - * - * @spi: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer - * to a struct regmap.  The map will be automatically freed by the - * device management code. - */ -struct regmap *devm_regmap_init_spi(struct spi_device *spi, -				    const struct regmap_config *config) +struct regmap *__devm_regmap_init_spi(struct spi_device *spi, +				      const struct regmap_config *config, +				      struct lock_class_key *lock_key, +				      const char *lock_name)  { -	return devm_regmap_init(&spi->dev, ®map_spi, &spi->dev, config); +	return __devm_regmap_init(&spi->dev, ®map_spi, &spi->dev, config, +				  lock_key, lock_name);  } -EXPORT_SYMBOL_GPL(devm_regmap_init_spi); +EXPORT_SYMBOL_GPL(__devm_regmap_init_spi);  MODULE_LICENSE("GPL"); diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c index d7026dc33388..7e58f6560399 100644 --- a/drivers/base/regmap/regmap-spmi.c +++ b/drivers/base/regmap/regmap-spmi.c @@ -91,36 +91,25 @@ static struct regmap_bus regmap_spmi_base = {  	.val_format_endian_default	= REGMAP_ENDIAN_NATIVE,  }; -/** - * regmap_init_spmi_base(): Create regmap for the Base register space - * @sdev:	SPMI device that will be interacted with - * @config:	Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer to - * a struct regmap. - */ -struct regmap *regmap_init_spmi_base(struct spmi_device *sdev, -				     const struct regmap_config *config) +struct regmap *__regmap_init_spmi_base(struct spmi_device *sdev, +				       const struct regmap_config *config, +				       struct lock_class_key *lock_key, +				       const char *lock_name)  { -	return regmap_init(&sdev->dev, ®map_spmi_base, sdev, config); +	return __regmap_init(&sdev->dev, ®map_spmi_base, sdev, config, +			     lock_key, lock_name);  } -EXPORT_SYMBOL_GPL(regmap_init_spmi_base); +EXPORT_SYMBOL_GPL(__regmap_init_spmi_base); -/** - * devm_regmap_init_spmi_base(): Create managed regmap for Base register space - * @sdev:	SPMI device that will be interacted with - * @config:	Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer - * to a struct regmap.  The regmap will be automatically freed by the - * device management code. - */ -struct regmap *devm_regmap_init_spmi_base(struct spmi_device *sdev, -					  const struct regmap_config *config) +struct regmap *__devm_regmap_init_spmi_base(struct spmi_device *sdev, +					    const struct regmap_config *config, +					    struct lock_class_key *lock_key, +					    const char *lock_name)  { -	return devm_regmap_init(&sdev->dev, ®map_spmi_base, sdev, config); +	return __devm_regmap_init(&sdev->dev, ®map_spmi_base, sdev, config, +				  lock_key, lock_name);  } -EXPORT_SYMBOL_GPL(devm_regmap_init_spmi_base); +EXPORT_SYMBOL_GPL(__devm_regmap_init_spmi_base);  static int regmap_spmi_ext_read(void *context,  				const void *reg, size_t reg_size, @@ -222,35 +211,24 @@ static struct regmap_bus regmap_spmi_ext = {  	.val_format_endian_default	= REGMAP_ENDIAN_NATIVE,  }; -/** - * regmap_init_spmi_ext(): Create regmap for Ext register space - * @sdev:	Device that will be interacted with - * @config:	Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer to - * a struct regmap. - */ -struct regmap *regmap_init_spmi_ext(struct spmi_device *sdev, -				    const struct regmap_config *config) +struct regmap *__regmap_init_spmi_ext(struct spmi_device *sdev, +				      const struct regmap_config *config, +				      struct lock_class_key *lock_key, +				      const char *lock_name)  { -	return regmap_init(&sdev->dev, ®map_spmi_ext, sdev, config); +	return __regmap_init(&sdev->dev, ®map_spmi_ext, sdev, config, +			     lock_key, lock_name);  } -EXPORT_SYMBOL_GPL(regmap_init_spmi_ext); +EXPORT_SYMBOL_GPL(__regmap_init_spmi_ext); -/** - * devm_regmap_init_spmi_ext(): Create managed regmap for Ext register space - * @sdev:	SPMI device that will be interacted with - * @config:	Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer - * to a struct regmap.  The regmap will be automatically freed by the - * device management code. - */ -struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *sdev, -				     const struct regmap_config *config) +struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *sdev, +					   const struct regmap_config *config, +					   struct lock_class_key *lock_key, +					   const char *lock_name)  { -	return devm_regmap_init(&sdev->dev, ®map_spmi_ext, sdev, config); +	return __devm_regmap_init(&sdev->dev, ®map_spmi_ext, sdev, config, +				  lock_key, lock_name);  } -EXPORT_SYMBOL_GPL(devm_regmap_init_spmi_ext); +EXPORT_SYMBOL_GPL(__devm_regmap_init_spmi_ext);  MODULE_LICENSE("GPL"); diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 7111d04f2621..afaf56200674 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -18,6 +18,7 @@  #include <linux/of.h>  #include <linux/rbtree.h>  #include <linux/sched.h> +#include <linux/delay.h>  #define CREATE_TRACE_POINTS  #include "trace.h" @@ -34,7 +35,7 @@  static int _regmap_update_bits(struct regmap *map, unsigned int reg,  			       unsigned int mask, unsigned int val, -			       bool *change); +			       bool *change, bool force_write);  static int _regmap_bus_reg_read(void *context, unsigned int reg,  				unsigned int *val); @@ -93,6 +94,9 @@ bool regmap_writeable(struct regmap *map, unsigned int reg)  bool regmap_readable(struct regmap *map, unsigned int reg)  { +	if (!map->reg_read) +		return false; +  	if (map->max_register && reg > map->max_register)  		return false; @@ -515,22 +519,12 @@ enum regmap_endian regmap_get_val_endian(struct device *dev,  }  EXPORT_SYMBOL_GPL(regmap_get_val_endian); -/** - * regmap_init(): Initialise register map - * - * @dev: Device that will be interacted with - * @bus: Bus-specific callbacks to use with device - * @bus_context: Data passed to bus-specific callbacks - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer to - * a struct regmap.  This function should generally not be called - * directly, it should be called by bus-specific init functions. - */ -struct regmap *regmap_init(struct device *dev, -			   const struct regmap_bus *bus, -			   void *bus_context, -			   const struct regmap_config *config) +struct regmap *__regmap_init(struct device *dev, +			     const struct regmap_bus *bus, +			     void *bus_context, +			     const struct regmap_config *config, +			     struct lock_class_key *lock_key, +			     const char *lock_name)  {  	struct regmap *map;  	int ret = -EINVAL; @@ -556,10 +550,14 @@ struct regmap *regmap_init(struct device *dev,  			spin_lock_init(&map->spinlock);  			map->lock = regmap_lock_spinlock;  			map->unlock = regmap_unlock_spinlock; +			lockdep_set_class_and_name(&map->spinlock, +						   lock_key, lock_name);  		} else {  			mutex_init(&map->mutex);  			map->lock = regmap_lock_mutex;  			map->unlock = regmap_unlock_mutex; +			lockdep_set_class_and_name(&map->mutex, +						   lock_key, lock_name);  		}  		map->lock_arg = map;  	} @@ -573,8 +571,13 @@ struct regmap *regmap_init(struct device *dev,  		map->reg_stride = config->reg_stride;  	else  		map->reg_stride = 1; -	map->use_single_rw = config->use_single_rw; -	map->can_multi_write = config->can_multi_write; +	map->use_single_read = config->use_single_rw || !bus || !bus->read; +	map->use_single_write = config->use_single_rw || !bus || !bus->write; +	map->can_multi_write = config->can_multi_write && bus && bus->write; +	if (bus) { +		map->max_raw_read = bus->max_raw_read; +		map->max_raw_write = bus->max_raw_write; +	}  	map->dev = dev;  	map->bus = bus;  	map->bus_context = bus_context; @@ -763,7 +766,7 @@ struct regmap *regmap_init(struct device *dev,  		if ((reg_endian != REGMAP_ENDIAN_BIG) ||  		    (val_endian != REGMAP_ENDIAN_BIG))  			goto err_map; -		map->use_single_rw = true; +		map->use_single_write = true;  	}  	if (!map->format.format_write && @@ -899,30 +902,19 @@ err_map:  err:  	return ERR_PTR(ret);  } -EXPORT_SYMBOL_GPL(regmap_init); +EXPORT_SYMBOL_GPL(__regmap_init);  static void devm_regmap_release(struct device *dev, void *res)  {  	regmap_exit(*(struct regmap **)res);  } -/** - * devm_regmap_init(): Initialise managed register map - * - * @dev: Device that will be interacted with - * @bus: Bus-specific callbacks to use with device - * @bus_context: Data passed to bus-specific callbacks - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer - * to a struct regmap.  This function should generally not be called - * directly, it should be called by bus-specific init functions.  The - * map will be automatically freed by the device management code. - */ -struct regmap *devm_regmap_init(struct device *dev, -				const struct regmap_bus *bus, -				void *bus_context, -				const struct regmap_config *config) +struct regmap *__devm_regmap_init(struct device *dev, +				  const struct regmap_bus *bus, +				  void *bus_context, +				  const struct regmap_config *config, +				  struct lock_class_key *lock_key, +				  const char *lock_name)  {  	struct regmap **ptr, *regmap; @@ -930,7 +922,8 @@ struct regmap *devm_regmap_init(struct device *dev,  	if (!ptr)  		return ERR_PTR(-ENOMEM); -	regmap = regmap_init(dev, bus, bus_context, config); +	regmap = __regmap_init(dev, bus, bus_context, config, +			       lock_key, lock_name);  	if (!IS_ERR(regmap)) {  		*ptr = regmap;  		devres_add(dev, ptr); @@ -940,7 +933,7 @@ struct regmap *devm_regmap_init(struct device *dev,  	return regmap;  } -EXPORT_SYMBOL_GPL(devm_regmap_init); +EXPORT_SYMBOL_GPL(__devm_regmap_init);  static void regmap_field_init(struct regmap_field *rm_field,  	struct regmap *regmap, struct reg_field reg_field) @@ -1178,7 +1171,7 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,  		ret = _regmap_update_bits(map, range->selector_reg,  					  range->selector_mask,  					  win_page << range->selector_shift, -					  &page_chg); +					  &page_chg, false);  		map->work_buf = orig_work_buf; @@ -1382,10 +1375,33 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,   */  bool regmap_can_raw_write(struct regmap *map)  { -	return map->bus && map->format.format_val && map->format.format_reg; +	return map->bus && map->bus->write && map->format.format_val && +		map->format.format_reg;  }  EXPORT_SYMBOL_GPL(regmap_can_raw_write); +/** + * regmap_get_raw_read_max - Get the maximum size we can read + * + * @map: Map to check. + */ +size_t regmap_get_raw_read_max(struct regmap *map) +{ +	return map->max_raw_read; +} +EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); + +/** + * regmap_get_raw_write_max - Get the maximum size we can read + * + * @map: Map to check. + */ +size_t regmap_get_raw_write_max(struct regmap *map) +{ +	return map->max_raw_write; +} +EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); +  static int _regmap_bus_formatted_write(void *context, unsigned int reg,  				       unsigned int val)  { @@ -1555,6 +1571,8 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,  		return -EINVAL;  	if (val_len % map->format.val_bytes)  		return -EINVAL; +	if (map->max_raw_write && map->max_raw_write > val_len) +		return -E2BIG;  	map->lock(map->lock_arg); @@ -1624,6 +1642,18 @@ int regmap_fields_write(struct regmap_field *field, unsigned int id,  }  EXPORT_SYMBOL_GPL(regmap_fields_write); +int regmap_fields_force_write(struct regmap_field *field, unsigned int id, +			unsigned int val) +{ +	if (id >= field->id_size) +		return -EINVAL; + +	return regmap_write_bits(field->regmap, +				  field->reg + (field->id_offset * id), +				  field->mask, val << field->shift); +} +EXPORT_SYMBOL_GPL(regmap_fields_force_write); +  /**   * regmap_fields_update_bits():	Perform a read/modify/write cycle   *                              on the register field @@ -1669,6 +1699,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,  {  	int ret = 0, i;  	size_t val_bytes = map->format.val_bytes; +	size_t total_size = val_bytes * val_count;  	if (map->bus && !map->format.parse_inplace)  		return -EINVAL; @@ -1677,9 +1708,15 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,  	/*  	 * Some devices don't support bulk write, for -	 * them we have a series of single write operations. +	 * them we have a series of single write operations in the first two if +	 * blocks. +	 * +	 * The first if block is used for memory mapped io. It does not allow +	 * val_bytes of 3 for example. +	 * The second one is used for busses which do not have this limitation +	 * and can write arbitrary value lengths.  	 */ -	if (!map->bus || map->use_single_rw) { +	if (!map->bus) {  		map->lock(map->lock_arg);  		for (i = 0; i < val_count; i++) {  			unsigned int ival; @@ -1711,6 +1748,38 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,  		}  out:  		map->unlock(map->lock_arg); +	} else if (map->use_single_write || +		   (map->max_raw_write && map->max_raw_write < total_size)) { +		int chunk_stride = map->reg_stride; +		size_t chunk_size = val_bytes; +		size_t chunk_count = val_count; + +		if (!map->use_single_write) { +			chunk_size = map->max_raw_write; +			if (chunk_size % val_bytes) +				chunk_size -= chunk_size % val_bytes; +			chunk_count = total_size / chunk_size; +			chunk_stride *= chunk_size / val_bytes; +		} + +		map->lock(map->lock_arg); +		/* Write as many bytes as possible with chunk_size */ +		for (i = 0; i < chunk_count; i++) { +			ret = _regmap_raw_write(map, +						reg + (i * chunk_stride), +						val + (i * chunk_size), +						chunk_size); +			if (ret) +				break; +		} + +		/* Write remaining bytes */ +		if (!ret && chunk_size * i < total_size) { +			ret = _regmap_raw_write(map, reg + (i * chunk_stride), +						val + (i * chunk_size), +						total_size - i * chunk_size); +		} +		map->unlock(map->lock_arg);  	} else {  		void *wval; @@ -1740,10 +1809,10 @@ EXPORT_SYMBOL_GPL(regmap_bulk_write);   *   * the (register,newvalue) pairs in regs have not been formatted, but   * they are all in the same page and have been changed to being page - * relative. The page register has been written if that was neccessary. + * relative. The page register has been written if that was necessary.   */  static int _regmap_raw_multi_reg_write(struct regmap *map, -				       const struct reg_default *regs, +				       const struct reg_sequence *regs,  				       size_t num_regs)  {  	int ret; @@ -1768,8 +1837,8 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,  	u8 = buf;  	for (i = 0; i < num_regs; i++) { -		int reg = regs[i].reg; -		int val = regs[i].def; +		unsigned int reg = regs[i].reg; +		unsigned int val = regs[i].def;  		trace_regmap_hw_write_start(map, reg, 1);  		map->format.format_reg(u8, reg, map->reg_shift);  		u8 += reg_bytes + pad_bytes; @@ -1800,17 +1869,19 @@ static unsigned int _regmap_register_page(struct regmap *map,  }  static int _regmap_range_multi_paged_reg_write(struct regmap *map, -					       struct reg_default *regs, +					       struct reg_sequence *regs,  					       size_t num_regs)  {  	int ret;  	int i, n; -	struct reg_default *base; +	struct reg_sequence *base;  	unsigned int this_page = 0; +	unsigned int page_change = 0;  	/*  	 * the set of registers are not neccessarily in order, but  	 * since the order of write must be preserved this algorithm -	 * chops the set each time the page changes +	 * chops the set each time the page changes. This also applies +	 * if there is a delay required at any point in the sequence.  	 */  	base = regs;  	for (i = 0, n = 0; i < num_regs; i++, n++) { @@ -1826,16 +1897,48 @@ static int _regmap_range_multi_paged_reg_write(struct regmap *map,  				this_page = win_page;  			if (win_page != this_page) {  				this_page = win_page; +				page_change = 1; +			} +		} + +		/* If we have both a page change and a delay make sure to +		 * write the regs and apply the delay before we change the +		 * page. +		 */ + +		if (page_change || regs[i].delay_us) { + +				/* For situations where the first write requires +				 * a delay we need to make sure we don't call +				 * raw_multi_reg_write with n=0 +				 * This can't occur with page breaks as we +				 * never write on the first iteration +				 */ +				if (regs[i].delay_us && i == 0) +					n = 1; +  				ret = _regmap_raw_multi_reg_write(map, base, n);  				if (ret != 0)  					return ret; + +				if (regs[i].delay_us) +					udelay(regs[i].delay_us); +  				base += n;  				n = 0; -			} -			ret = _regmap_select_page(map, &base[n].reg, range, 1); -			if (ret != 0) -				return ret; + +				if (page_change) { +					ret = _regmap_select_page(map, +								  &base[n].reg, +								  range, 1); +					if (ret != 0) +						return ret; + +					page_change = 0; +				} +  		} +  	}  	if (n > 0)  		return _regmap_raw_multi_reg_write(map, base, n); @@ -1843,7 +1946,7 @@ static int _regmap_range_multi_paged_reg_write(struct regmap *map,  }  static int _regmap_multi_reg_write(struct regmap *map, -				   const struct reg_default *regs, +				   const struct reg_sequence *regs,  				   size_t num_regs)  {  	int i; @@ -1854,6 +1957,9 @@ static int _regmap_multi_reg_write(struct regmap *map,  			ret = _regmap_write(map, regs[i].reg, regs[i].def);  			if (ret != 0)  				return ret; + +			if (regs[i].delay_us) +				udelay(regs[i].delay_us);  		}  		return 0;  	} @@ -1893,10 +1999,14 @@ static int _regmap_multi_reg_write(struct regmap *map,  	for (i = 0; i < num_regs; i++) {  		unsigned int reg = regs[i].reg;  		struct regmap_range_node *range; + +		/* Coalesce all the writes between a page break or a delay +		 * in a sequence +		 */  		range = _regmap_range_lookup(map, reg); -		if (range) { -			size_t len = sizeof(struct reg_default)*num_regs; -			struct reg_default *base = kmemdup(regs, len, +		if (range || regs[i].delay_us) { +			size_t len = sizeof(struct reg_sequence)*num_regs; +			struct reg_sequence *base = kmemdup(regs, len,  							   GFP_KERNEL);  			if (!base)  				return -ENOMEM; @@ -1929,7 +2039,7 @@ static int _regmap_multi_reg_write(struct regmap *map,   * A value of zero will be returned on success, a negative errno will be   * returned in error cases.   */ -int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs, +int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,  			   int num_regs)  {  	int ret; @@ -1962,7 +2072,7 @@ EXPORT_SYMBOL_GPL(regmap_multi_reg_write);   * be returned in error cases.   */  int regmap_multi_reg_write_bypassed(struct regmap *map, -				    const struct reg_default *regs, +				    const struct reg_sequence *regs,  				    int num_regs)  {  	int ret; @@ -2050,7 +2160,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,  	/*  	 * Some buses or devices flag reads by setting the high bits in the -	 * register addresss; since it's always the high bits for all +	 * register address; since it's always the high bits for all  	 * current formats we can do this here rather than in  	 * formatting.  This may break if we get interesting formats.  	 */ @@ -2097,8 +2207,6 @@ static int _regmap_read(struct regmap *map, unsigned int reg,  	int ret;  	void *context = _regmap_map_get_context(map); -	WARN_ON(!map->reg_read); -  	if (!map->cache_bypass) {  		ret = regcache_read(map, reg, val);  		if (ret == 0) @@ -2179,11 +2287,22 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,  		return -EINVAL;  	if (reg % map->reg_stride)  		return -EINVAL; +	if (val_count == 0) +		return -EINVAL;  	map->lock(map->lock_arg);  	if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||  	    map->cache_type == REGCACHE_NONE) { +		if (!map->bus->read) { +			ret = -ENOTSUPP; +			goto out; +		} +		if (map->max_raw_read && map->max_raw_read < val_len) { +			ret = -E2BIG; +			goto out; +		} +  		/* Physical block read if there's no cache involved */  		ret = _regmap_raw_read(map, reg, val, val_len); @@ -2292,20 +2411,51 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,  		 * Some devices does not support bulk read, for  		 * them we have a series of single read operations.  		 */ -		if (map->use_single_rw) { -			for (i = 0; i < val_count; i++) { -				ret = regmap_raw_read(map, -						reg + (i * map->reg_stride), -						val + (i * val_bytes), -						val_bytes); -				if (ret != 0) -					return ret; -			} -		} else { +		size_t total_size = val_bytes * val_count; + +		if (!map->use_single_read && +		    (!map->max_raw_read || map->max_raw_read > total_size)) {  			ret = regmap_raw_read(map, reg, val,  					      val_bytes * val_count);  			if (ret != 0)  				return ret; +		} else { +			/* +			 * Some devices do not support bulk read or do not +			 * support large bulk reads, for them we have a series +			 * of read operations. +			 */ +			int chunk_stride = map->reg_stride; +			size_t chunk_size = val_bytes; +			size_t chunk_count = val_count; + +			if (!map->use_single_read) { +				chunk_size = map->max_raw_read; +				if (chunk_size % val_bytes) +					chunk_size -= chunk_size % val_bytes; +				chunk_count = total_size / chunk_size; +				chunk_stride *= chunk_size / val_bytes; +			} + +			/* Read bytes that fit into a multiple of chunk_size */ +			for (i = 0; i < chunk_count; i++) { +				ret = regmap_raw_read(map, +						      reg + (i * chunk_stride), +						      val + (i * chunk_size), +						      chunk_size); +				if (ret != 0) +					return ret; +			} + +			/* Read remaining bytes */ +			if (chunk_size * i < total_size) { +				ret = regmap_raw_read(map, +						      reg + (i * chunk_stride), +						      val + (i * chunk_size), +						      total_size - i * chunk_size); +				if (ret != 0) +					return ret; +			}  		}  		for (i = 0; i < val_count * val_bytes; i += val_bytes) @@ -2317,7 +2467,34 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,  					  &ival);  			if (ret != 0)  				return ret; -			map->format.format_val(val + (i * val_bytes), ival, 0); + +			if (map->format.format_val) { +				map->format.format_val(val + (i * val_bytes), ival, 0); +			} else { +				/* Devices providing read and write +				 * operations can use the bulk I/O +				 * functions if they define a val_bytes, +				 * we assume that the values are native +				 * endian. +				 */ +				u32 *u32 = val; +				u16 *u16 = val; +				u8 *u8 = val; + +				switch (map->format.val_bytes) { +				case 4: +					u32[i] = ival; +					break; +				case 2: +					u16[i] = ival; +					break; +				case 1: +					u8[i] = ival; +					break; +				default: +					return -EINVAL; +				} +			}  		}  	} @@ -2327,7 +2504,7 @@ EXPORT_SYMBOL_GPL(regmap_bulk_read);  static int _regmap_update_bits(struct regmap *map, unsigned int reg,  			       unsigned int mask, unsigned int val, -			       bool *change) +			       bool *change, bool force_write)  {  	int ret;  	unsigned int tmp, orig; @@ -2339,7 +2516,7 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,  	tmp = orig & ~mask;  	tmp |= val & mask; -	if (tmp != orig) { +	if (force_write || (tmp != orig)) {  		ret = _regmap_write(map, reg, tmp);  		if (change)  			*change = true; @@ -2367,7 +2544,7 @@ int regmap_update_bits(struct regmap *map, unsigned int reg,  	int ret;  	map->lock(map->lock_arg); -	ret = _regmap_update_bits(map, reg, mask, val, NULL); +	ret = _regmap_update_bits(map, reg, mask, val, NULL, false);  	map->unlock(map->lock_arg);  	return ret; @@ -2375,6 +2552,29 @@ int regmap_update_bits(struct regmap *map, unsigned int reg,  EXPORT_SYMBOL_GPL(regmap_update_bits);  /** + * regmap_write_bits: Perform a read/modify/write cycle on the register map + * + * @map: Register map to update + * @reg: Register to update + * @mask: Bitmask to change + * @val: New value for bitmask + * + * Returns zero for success, a negative number on error. + */ +int regmap_write_bits(struct regmap *map, unsigned int reg, +		      unsigned int mask, unsigned int val) +{ +	int ret; + +	map->lock(map->lock_arg); +	ret = _regmap_update_bits(map, reg, mask, val, NULL, true); +	map->unlock(map->lock_arg); + +	return ret; +} +EXPORT_SYMBOL_GPL(regmap_write_bits); + +/**   * regmap_update_bits_async: Perform a read/modify/write cycle on the register   *                           map asynchronously   * @@ -2398,7 +2598,7 @@ int regmap_update_bits_async(struct regmap *map, unsigned int reg,  	map->async = true; -	ret = _regmap_update_bits(map, reg, mask, val, NULL); +	ret = _regmap_update_bits(map, reg, mask, val, NULL, false);  	map->async = false; @@ -2427,7 +2627,7 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,  	int ret;  	map->lock(map->lock_arg); -	ret = _regmap_update_bits(map, reg, mask, val, change); +	ret = _regmap_update_bits(map, reg, mask, val, change, false);  	map->unlock(map->lock_arg);  	return ret;  } @@ -2460,7 +2660,7 @@ int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,  	map->async = true; -	ret = _regmap_update_bits(map, reg, mask, val, change); +	ret = _regmap_update_bits(map, reg, mask, val, change, false);  	map->async = false; @@ -2552,10 +2752,10 @@ EXPORT_SYMBOL_GPL(regmap_async_complete);   * The caller must ensure that this function cannot be called   * concurrently with either itself or regcache_sync().   */ -int regmap_register_patch(struct regmap *map, const struct reg_default *regs, +int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,  			  int num_regs)  { -	struct reg_default *p; +	struct reg_sequence *p;  	int ret;  	bool bypass; @@ -2564,7 +2764,7 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,  		return 0;  	p = krealloc(map->patch, -		     sizeof(struct reg_default) * (map->patch_regs + num_regs), +		     sizeof(struct reg_sequence) * (map->patch_regs + num_regs),  		     GFP_KERNEL);  	if (p) {  		memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));  |