diff options
Diffstat (limited to 'drivers/base/core.c')
| -rw-r--r-- | drivers/base/core.c | 27 | 
1 files changed, 20 insertions, 7 deletions
| diff --git a/drivers/base/core.c b/drivers/base/core.c index 42a672456432..dbb0f9130f42 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -718,6 +718,8 @@ static void __device_links_queue_sync_state(struct device *dev,  {  	struct device_link *link; +	if (!dev_has_sync_state(dev)) +		return;  	if (dev->state_synced)  		return; @@ -745,25 +747,31 @@ static void __device_links_queue_sync_state(struct device *dev,  /**   * device_links_flush_sync_list - Call sync_state() on a list of devices   * @list: List of devices to call sync_state() on + * @dont_lock_dev: Device for which lock is already held by the caller   *   * Calls sync_state() on all the devices that have been queued for it. This - * function is used in conjunction with __device_links_queue_sync_state(). + * function is used in conjunction with __device_links_queue_sync_state(). The + * @dont_lock_dev parameter is useful when this function is called from a + * context where a device lock is already held.   */ -static void device_links_flush_sync_list(struct list_head *list) +static void device_links_flush_sync_list(struct list_head *list, +					 struct device *dont_lock_dev)  {  	struct device *dev, *tmp;  	list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {  		list_del_init(&dev->links.defer_sync); -		device_lock(dev); +		if (dev != dont_lock_dev) +			device_lock(dev);  		if (dev->bus->sync_state)  			dev->bus->sync_state(dev);  		else if (dev->driver && dev->driver->sync_state)  			dev->driver->sync_state(dev); -		device_unlock(dev); +		if (dev != dont_lock_dev) +			device_unlock(dev);  		put_device(dev);  	} @@ -801,7 +809,7 @@ void device_links_supplier_sync_state_resume(void)  out:  	device_links_write_unlock(); -	device_links_flush_sync_list(&sync_list); +	device_links_flush_sync_list(&sync_list, NULL);  }  static int sync_state_resume_initcall(void) @@ -813,7 +821,7 @@ late_initcall(sync_state_resume_initcall);  static void __device_links_supplier_defer_sync(struct device *sup)  { -	if (list_empty(&sup->links.defer_sync)) +	if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup))  		list_add_tail(&sup->links.defer_sync, &deferred_sync);  } @@ -865,6 +873,11 @@ void device_links_driver_bound(struct device *dev)  			driver_deferred_probe_add(link->consumer);  	} +	if (defer_sync_state_count) +		__device_links_supplier_defer_sync(dev); +	else +		__device_links_queue_sync_state(dev, &sync_list); +  	list_for_each_entry(link, &dev->links.suppliers, c_node) {  		if (!(link->flags & DL_FLAG_MANAGED))  			continue; @@ -883,7 +896,7 @@ void device_links_driver_bound(struct device *dev)  	device_links_write_unlock(); -	device_links_flush_sync_list(&sync_list); +	device_links_flush_sync_list(&sync_list, dev);  }  static void device_link_drop_managed(struct device_link *link) |