diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/Kconfig | 2 | ||||
-rw-r--r-- | drivers/base/auxiliary.c | 13 | ||||
-rw-r--r-- | drivers/base/base.h | 5 | ||||
-rw-r--r-- | drivers/base/bus.c | 19 | ||||
-rw-r--r-- | drivers/base/core.c | 209 | ||||
-rw-r--r-- | drivers/base/dd.c | 9 | ||||
-rw-r--r-- | drivers/base/devtmpfs.c | 15 | ||||
-rw-r--r-- | drivers/base/init.c | 1 | ||||
-rw-r--r-- | drivers/base/isa.c | 2 | ||||
-rw-r--r-- | drivers/base/memory.c | 35 | ||||
-rw-r--r-- | drivers/base/node.c | 33 | ||||
-rw-r--r-- | drivers/base/platform.c | 15 | ||||
-rw-r--r-- | drivers/base/power/clock_ops.c | 223 | ||||
-rw-r--r-- | drivers/base/power/domain.c | 89 | ||||
-rw-r--r-- | drivers/base/power/domain_governor.c | 102 | ||||
-rw-r--r-- | drivers/base/power/main.c | 9 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 2 | ||||
-rw-r--r-- | drivers/base/property.c | 15 | ||||
-rw-r--r-- | drivers/base/regmap/regcache.c | 2 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-sdw-mbq.c | 10 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-sdw.c | 4 | ||||
-rw-r--r-- | drivers/base/swnode.c | 294 | ||||
-rw-r--r-- | drivers/base/test/Makefile | 1 |
23 files changed, 883 insertions, 226 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 040be48ce046..324aa03fed3c 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -161,7 +161,7 @@ config HMEM_REPORTING default n depends on NUMA help - Enable reporting for heterogenous memory access attributes under + Enable reporting for heterogeneous memory access attributes under their non-uniform memory nodes. source "drivers/base/test/Kconfig" diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c index 8336535f1e11..d8b314e7d0fd 100644 --- a/drivers/base/auxiliary.c +++ b/drivers/base/auxiliary.c @@ -15,6 +15,7 @@ #include <linux/pm_runtime.h> #include <linux/string.h> #include <linux/auxiliary_bus.h> +#include "base.h" static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id, const struct auxiliary_device *auxdev) @@ -260,19 +261,11 @@ void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv) } EXPORT_SYMBOL_GPL(auxiliary_driver_unregister); -static int __init auxiliary_bus_init(void) +void __init auxiliary_bus_init(void) { - return bus_register(&auxiliary_bus_type); + WARN_ON(bus_register(&auxiliary_bus_type)); } -static void __exit auxiliary_bus_exit(void) -{ - bus_unregister(&auxiliary_bus_type); -} - -module_init(auxiliary_bus_init); -module_exit(auxiliary_bus_exit); - MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Auxiliary Bus"); MODULE_AUTHOR("David Ertman <david.m.ertman@intel.com>"); diff --git a/drivers/base/base.h b/drivers/base/base.h index f5600a83124f..52b3d7b75c27 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -119,6 +119,11 @@ static inline int hypervisor_init(void) { return 0; } extern int platform_bus_init(void); extern void cpu_dev_init(void); extern void container_dev_init(void); +#ifdef CONFIG_AUXILIARY_BUS +extern void auxiliary_bus_init(void); +#else +static inline void auxiliary_bus_init(void) { } +#endif struct kobject *virtual_device_parent(struct device *dev); diff --git a/drivers/base/bus.c b/drivers/base/bus.c index a9c23ecebc7c..36d0c654ea61 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -633,7 +633,7 @@ int bus_add_driver(struct device_driver *drv) error = driver_add_groups(drv, bus->drv_groups); if (error) { /* How the hell do we get out of this pickle? Give up */ - printk(KERN_ERR "%s: driver_create_groups(%s) failed\n", + printk(KERN_ERR "%s: driver_add_groups(%s) failed\n", __func__, drv->name); } @@ -729,23 +729,6 @@ int device_reprobe(struct device *dev) } EXPORT_SYMBOL_GPL(device_reprobe); -/** - * find_bus - locate bus by name. - * @name: name of bus. - * - * Call kset_find_obj() to iterate over list of buses to - * find a bus by name. Return bus if found. - * - * Note that kset_find_obj increments bus' reference count. - */ -#if 0 -struct bus_type *find_bus(char *name) -{ - struct kobject *k = kset_find_obj(bus_kset, name); - return k ? to_bus(k) : NULL; -} -#endif /* 0 */ - static int bus_add_groups(struct bus_type *bus, const struct attribute_group **groups) { diff --git a/drivers/base/core.c b/drivers/base/core.c index 14f165816742..f29839382f81 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -28,6 +28,7 @@ #include <linux/sched/signal.h> #include <linux/sched/mm.h> #include <linux/sysfs.h> +#include <linux/dma-map-ops.h> /* for dma_default_coherent */ #include "base.h" #include "power/power.h" @@ -148,6 +149,21 @@ void fwnode_links_purge(struct fwnode_handle *fwnode) fwnode_links_purge_consumers(fwnode); } +static void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode) +{ + struct fwnode_handle *child; + + /* Don't purge consumer links of an added child */ + if (fwnode->dev) + return; + + fwnode->flags |= FWNODE_FLAG_NOT_DEVICE; + fwnode_links_purge_consumers(fwnode); + + fwnode_for_each_available_child_node(fwnode, child) + fw_devlink_purge_absent_suppliers(child); +} + #ifdef CONFIG_SRCU static DEFINE_MUTEX(device_links_lock); DEFINE_STATIC_SRCU(device_links_srcu); @@ -208,6 +224,16 @@ int device_links_read_lock_held(void) #endif #endif /* !CONFIG_SRCU */ +static bool device_is_ancestor(struct device *dev, struct device *target) +{ + while (target->parent) { + target = target->parent; + if (dev == target) + return true; + } + return false; +} + /** * device_is_dependent - Check if one device depends on another one * @dev: Device to check dependencies for. @@ -221,7 +247,12 @@ int device_is_dependent(struct device *dev, void *target) struct device_link *link; int ret; - if (dev == target) + /* + * The "ancestors" check is needed to catch the case when the target + * device has not been completely initialized yet and it is still + * missing from the list of children of its parent device. + */ + if (dev == target || device_is_ancestor(dev, target)) return 1; ret = device_for_each_child(dev, target, device_is_dependent); @@ -229,7 +260,8 @@ int device_is_dependent(struct device *dev, void *target) return ret; list_for_each_entry(link, &dev->links.consumers, s_node) { - if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) + if ((link->flags & ~DL_FLAG_INFERRED) == + (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) continue; if (link->consumer == target) @@ -302,7 +334,8 @@ static int device_reorder_to_tail(struct device *dev, void *not_used) device_for_each_child(dev, NULL, device_reorder_to_tail); list_for_each_entry(link, &dev->links.consumers, s_node) { - if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) + if ((link->flags & ~DL_FLAG_INFERRED) == + (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) continue; device_reorder_to_tail(link->consumer, NULL); } @@ -456,7 +489,9 @@ static int devlink_add_symlinks(struct device *dev, struct device *con = link->consumer; char *buf; - len = max(strlen(dev_name(sup)), strlen(dev_name(con))); + len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)), + strlen(dev_bus_name(con)) + strlen(dev_name(con))); + len += strlen(":"); len += strlen("supplier:") + 1; buf = kzalloc(len, GFP_KERNEL); if (!buf) @@ -470,12 +505,12 @@ static int devlink_add_symlinks(struct device *dev, if (ret) goto err_con; - snprintf(buf, len, "consumer:%s", dev_name(con)); + snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf); if (ret) goto err_con_dev; - snprintf(buf, len, "supplier:%s", dev_name(sup)); + snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup)); ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf); if (ret) goto err_sup_dev; @@ -483,7 +518,7 @@ static int devlink_add_symlinks(struct device *dev, goto out; err_sup_dev: - snprintf(buf, len, "consumer:%s", dev_name(con)); + snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); sysfs_remove_link(&sup->kobj, buf); err_con_dev: sysfs_remove_link(&link->link_dev.kobj, "consumer"); @@ -506,7 +541,9 @@ static void devlink_remove_symlinks(struct device *dev, sysfs_remove_link(&link->link_dev.kobj, "consumer"); sysfs_remove_link(&link->link_dev.kobj, "supplier"); - len = max(strlen(dev_name(sup)), strlen(dev_name(con))); + len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)), + strlen(dev_bus_name(con)) + strlen(dev_name(con))); + len += strlen(":"); len += strlen("supplier:") + 1; buf = kzalloc(len, GFP_KERNEL); if (!buf) { @@ -514,9 +551,9 @@ static void devlink_remove_symlinks(struct device *dev, return; } - snprintf(buf, len, "supplier:%s", dev_name(sup)); + snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup)); sysfs_remove_link(&con->kobj, buf); - snprintf(buf, len, "consumer:%s", dev_name(con)); + snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); sysfs_remove_link(&sup->kobj, buf); kfree(buf); } @@ -546,7 +583,8 @@ postcore_initcall(devlink_class_init); #define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \ DL_FLAG_AUTOREMOVE_SUPPLIER | \ DL_FLAG_AUTOPROBE_CONSUMER | \ - DL_FLAG_SYNC_STATE_ONLY) + DL_FLAG_SYNC_STATE_ONLY | \ + DL_FLAG_INFERRED) #define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \ DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE) @@ -615,7 +653,7 @@ struct device_link *device_link_add(struct device *consumer, if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS || (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) || (flags & DL_FLAG_SYNC_STATE_ONLY && - flags != DL_FLAG_SYNC_STATE_ONLY) || + (flags & ~DL_FLAG_INFERRED) != DL_FLAG_SYNC_STATE_ONLY) || (flags & DL_FLAG_AUTOPROBE_CONSUMER && flags & (DL_FLAG_AUTOREMOVE_CONSUMER | DL_FLAG_AUTOREMOVE_SUPPLIER))) @@ -671,6 +709,10 @@ struct device_link *device_link_add(struct device *consumer, if (link->consumer != consumer) continue; + if (link->flags & DL_FLAG_INFERRED && + !(flags & DL_FLAG_INFERRED)) + link->flags &= ~DL_FLAG_INFERRED; + if (flags & DL_FLAG_PM_RUNTIME) { if (!(link->flags & DL_FLAG_PM_RUNTIME)) { pm_runtime_new_link(consumer); @@ -737,8 +779,9 @@ struct device_link *device_link_add(struct device *consumer, link->link_dev.class = &devlink_class; device_set_pm_not_required(&link->link_dev); - dev_set_name(&link->link_dev, "%s--%s", - dev_name(supplier), dev_name(consumer)); + dev_set_name(&link->link_dev, "%s:%s--%s:%s", + dev_bus_name(supplier), dev_name(supplier), + dev_bus_name(consumer), dev_name(consumer)); if (device_register(&link->link_dev)) { put_device(consumer); put_device(supplier); @@ -929,6 +972,10 @@ int device_links_check_suppliers(struct device *dev) mutex_lock(&fwnode_link_lock); if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) && !fw_devlink_is_permissive()) { + dev_dbg(dev, "probe deferral - wait for supplier %pfwP\n", + list_first_entry(&dev->fwnode->suppliers, + struct fwnode_link, + c_hook)->supplier); mutex_unlock(&fwnode_link_lock); return -EPROBE_DEFER; } @@ -943,6 +990,8 @@ int device_links_check_suppliers(struct device *dev) if (link->status != DL_STATE_AVAILABLE && !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) { device_links_missing_supplier(dev); + dev_dbg(dev, "probe deferral - supplier %s not ready\n", + dev_name(link->supplier)); ret = -EPROBE_DEFER; break; } @@ -1121,12 +1170,22 @@ void device_links_driver_bound(struct device *dev) LIST_HEAD(sync_list); /* - * If a device probes successfully, it's expected to have created all + * If a device binds successfully, it's expected to have created all * the device links it needs to or make new device links as it needs - * them. So, it no longer needs to wait on any suppliers. + * them. So, fw_devlink no longer needs to create device links to any + * of the device's suppliers. + * + * Also, if a child firmware node of this bound device is not added as + * a device by now, assume it is never going to be added and make sure + * other devices don't defer probe indefinitely by waiting for such a + * child device. */ - if (dev->fwnode && dev->fwnode->dev == dev) + if (dev->fwnode && dev->fwnode->dev == dev) { + struct fwnode_handle *child; fwnode_links_purge_suppliers(dev->fwnode); + fwnode_for_each_available_child_node(dev->fwnode, child) + fw_devlink_purge_absent_suppliers(child); + } device_remove_file(dev, &dev_attr_waiting_for_supplier); device_links_write_lock(); @@ -1437,7 +1496,14 @@ static void device_links_purge(struct device *dev) device_links_write_unlock(); } -static u32 fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY; +#define FW_DEVLINK_FLAGS_PERMISSIVE (DL_FLAG_INFERRED | \ + DL_FLAG_SYNC_STATE_ONLY) +#define FW_DEVLINK_FLAGS_ON (DL_FLAG_INFERRED | \ + DL_FLAG_AUTOPROBE_CONSUMER) +#define FW_DEVLINK_FLAGS_RPM (FW_DEVLINK_FLAGS_ON | \ + DL_FLAG_PM_RUNTIME) + +static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_PERMISSIVE; static int __init fw_devlink_setup(char *arg) { if (!arg) @@ -1446,17 +1512,23 @@ static int __init fw_devlink_setup(char *arg) if (strcmp(arg, "off") == 0) { fw_devlink_flags = 0; } else if (strcmp(arg, "permissive") == 0) { - fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY; + fw_devlink_flags = FW_DEVLINK_FLAGS_PERMISSIVE; } else if (strcmp(arg, "on") == 0) { - fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER; + fw_devlink_flags = FW_DEVLINK_FLAGS_ON; } else if (strcmp(arg, "rpm") == 0) { - fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER | - DL_FLAG_PM_RUNTIME; + fw_devlink_flags = FW_DEVLINK_FLAGS_RPM; } return 0; } early_param("fw_devlink", fw_devlink_setup); +static bool fw_devlink_strict; +static int __init fw_devlink_strict_setup(char *arg) +{ + return strtobool(arg, &fw_devlink_strict); +} +early_param("fw_devlink.strict", fw_devlink_strict_setup); + u32 fw_devlink_get_flags(void) { return fw_devlink_flags; @@ -1464,7 +1536,12 @@ u32 fw_devlink_get_flags(void) static bool fw_devlink_is_permissive(void) { - return fw_devlink_flags == DL_FLAG_SYNC_STATE_ONLY; + return fw_devlink_flags == FW_DEVLINK_FLAGS_PERMISSIVE; +} + +bool fw_devlink_is_strict(void) +{ + return fw_devlink_strict && !fw_devlink_is_permissive(); } static void fw_devlink_parse_fwnode(struct fwnode_handle *fwnode) @@ -1487,6 +1564,53 @@ static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode) } /** + * fw_devlink_relax_cycle - Convert cyclic links to SYNC_STATE_ONLY links + * @con: Device to check dependencies for. + * @sup: Device to check against. + * + * Check if @sup depends on @con or any device dependent on it (its child or + * its consumer etc). When such a cyclic dependency is found, convert all + * device links created solely by fw_devlink into SYNC_STATE_ONLY device links. + * This is the equivalent of doing fw_devlink=permissive just between the + * devices in the cycle. We need to do this because, at this point, fw_devlink + * can't tell which of these dependencies is not a real dependency. + * + * Return 1 if a cycle is found. Otherwise, return 0. + */ +static int fw_devlink_relax_cycle(struct device *con, void *sup) +{ + struct device_link *link; + int ret; + + if (con == sup) + return 1; + + ret = device_for_each_child(con, sup, fw_devlink_relax_cycle); + if (ret) + return ret; + + list_for_each_entry(link, &con->links.consumers, s_node) { + if ((link->flags & ~DL_FLAG_INFERRED) == + (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) + continue; + + if (!fw_devlink_relax_cycle(link->consumer, sup)) + continue; + + ret = 1; + + if (!(link->flags & DL_FLAG_INFERRED)) + continue; + + pm_runtime_drop_link(link); + link->flags = DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE; + dev_dbg(link->consumer, "Relaxing link with %s\n", + dev_name(link->supplier)); + } + return ret; +} + +/** * fw_devlink_create_devlink - Create a device link from a consumer to fwnode * @con - Consumer device for the device link * @sup_handle - fwnode handle of supplier @@ -1514,15 +1638,39 @@ static int fw_devlink_create_devlink(struct device *con, sup_dev = get_dev_from_fwnode(sup_handle); if (sup_dev) { /* + * If it's one of those drivers that don't actually bind to + * their device using driver core, then don't wait on this + * supplier device indefinitely. + */ + if (sup_dev->links.status == DL_DEV_NO_DRIVER && + sup_handle->flags & FWNODE_FLAG_INITIALIZED) { + ret = -EINVAL; + goto out; + } + + /* * If this fails, it is due to cycles in device links. Just * give up on this link and treat it as invalid. */ - if (!device_link_add(con, sup_dev, flags)) + if (!device_link_add(con, sup_dev, flags) && + !(flags & DL_FLAG_SYNC_STATE_ONLY)) { + dev_info(con, "Fixing up cyclic dependency with %s\n", + dev_name(sup_dev)); + device_links_write_lock(); + fw_devlink_relax_cycle(con, sup_dev); + device_links_write_unlock(); + device_link_add(con, sup_dev, + FW_DEVLINK_FLAGS_PERMISSIVE); ret = -EINVAL; + } goto out; } + /* Supplier that's already initialized without a struct device. */ + if (sup_handle->flags & FWNODE_FLAG_INITIALIZED) + return -EINVAL; + /* * DL_FLAG_SYNC_STATE_ONLY doesn't block probing and supports * cycles. So cycle detection isn't necessary and shouldn't be @@ -1611,7 +1759,7 @@ static void __fw_devlink_link_to_consumers(struct device *dev) con_dev = NULL; } else { own_link = false; - dl_flags = DL_FLAG_SYNC_STATE_ONLY; + dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE; } } @@ -1666,7 +1814,7 @@ static void __fw_devlink_link_to_suppliers(struct device *dev, if (own_link) dl_flags = fw_devlink_get_flags(); else - dl_flags = DL_FLAG_SYNC_STATE_ONLY; + dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE; list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) { int ret; @@ -1808,9 +1956,7 @@ const char *dev_driver_string(const struct device *dev) * never change once they are set, so they don't need special care. */ drv = READ_ONCE(dev->driver); - return drv ? drv->name : - (dev->bus ? dev->bus->name : - (dev->class ? dev->class->name : "")); + return drv ? drv->name : dev_bus_name(dev); } EXPORT_SYMBOL(dev_driver_string); @@ -2585,6 +2731,11 @@ void device_initialize(struct device *dev) INIT_LIST_HEAD(&dev->links.suppliers); INIT_LIST_HEAD(&dev->links.defer_sync); dev->links.status = DL_DEV_NO_DRIVER; +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) + dev->dma_coherent = dma_default_coherent; +#endif } EXPORT_SYMBOL_GPL(device_initialize); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 2f32f38a11ed..9179825ff646 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -371,13 +371,6 @@ static void driver_bound(struct device *dev) device_pm_check_callbacks(dev); /* - * Reorder successfully probed devices to the end of the device list. - * This ensures that suspend/resume order matches probe order, which - * is usually what drivers rely on. - */ - device_pm_move_to_tail(dev); - - /* * Make sure the device is no longer in one of the deferred lists and * kick off retrying all pending devices */ @@ -619,6 +612,8 @@ dev_groups_failed: else if (drv->remove) drv->remove(dev); probe_failed: + kfree(dev->dma_range_map); + dev->dma_range_map = NULL; if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_DRIVER_NOT_BOUND, dev); diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index eac184e6d657..653c8c6ac7a7 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -162,7 +162,7 @@ static int dev_mkdir(const char *name, umode_t mode) if (IS_ERR(dentry)) return PTR_ERR(dentry); - err = vfs_mkdir(d_inode(path.dentry), dentry, mode); + err = vfs_mkdir(&init_user_ns, d_inode(path.dentry), dentry, mode); if (!err) /* mark as kernel-created inode */ d_inode(dentry)->i_private = &thread; @@ -212,7 +212,8 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid, if (IS_ERR(dentry)) return PTR_ERR(dentry); - err = vfs_mknod(d_inode(path.dentry), dentry, mode, dev->devt); + err = vfs_mknod(&init_user_ns, d_inode(path.dentry), dentry, mode, + dev->devt); if (!err) { struct iattr newattrs; @@ -221,7 +222,7 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid, newattrs.ia_gid = gid; newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID; inode_lock(d_inode(dentry)); - notify_change(dentry, &newattrs, NULL); + notify_change(&init_user_ns, dentry, &newattrs, NULL); inode_unlock(d_inode(dentry)); /* mark as kernel-created inode */ @@ -242,7 +243,8 @@ static int dev_rmdir(const char *name) return PTR_ERR(dentry); if (d_really_is_positive(dentry)) { if (d_inode(dentry)->i_private == &thread) - err = vfs_rmdir(d_inode(parent.dentry), dentry); + err = vfs_rmdir(&init_user_ns, d_inode(parent.dentry), + dentry); else err = -EPERM; } else { @@ -328,9 +330,10 @@ static int handle_remove(const char *nodename, struct device *dev) newattrs.ia_valid = ATTR_UID|ATTR_GID|ATTR_MODE; inode_lock(d_inode(dentry)); - notify_change(dentry, &newattrs, NULL); + notify_change(&init_user_ns, dentry, &newattrs, NULL); inode_unlock(d_inode(dentry)); - err = vfs_unlink(d_inode(parent.dentry), dentry, NULL); + err = vfs_unlink(&init_user_ns, d_inode(parent.dentry), + dentry, NULL); if (!err || err == -ENOENT) deleted = 1; } diff --git a/drivers/base/init.c b/drivers/base/init.c index 908e6520e804..a9f57c22fb9e 100644 --- a/drivers/base/init.c +++ b/drivers/base/init.c @@ -32,6 +32,7 @@ void __init driver_init(void) */ of_core_init(); platform_bus_init(); + auxiliary_bus_init(); cpu_dev_init(); memory_dev_init(); container_dev_init(); diff --git a/drivers/base/isa.c b/drivers/base/isa.c index 2772f5d1948a..aa4737667026 100644 --- a/drivers/base/isa.c +++ b/drivers/base/isa.c @@ -51,7 +51,7 @@ static int isa_bus_remove(struct device *dev) struct isa_driver *isa_driver = dev->platform_data; if (isa_driver && isa_driver->remove) - return isa_driver->remove(dev, to_isa_dev(dev)->id); + isa_driver->remove(dev, to_isa_dev(dev)->id); return 0; } diff --git a/drivers/base/memory.c b/drivers/base/memory.c index eef4ffb6122c..f35298425575 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -35,7 +35,7 @@ static const char *const online_type_to_str[] = { [MMOP_ONLINE_MOVABLE] = "online_movable", }; -int memhp_online_type_from_str(const char *str) +int mhp_online_type_from_str(const char *str) { int i; @@ -253,7 +253,7 @@ static int memory_subsys_offline(struct device *dev) static ssize_t state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - const int online_type = memhp_online_type_from_str(buf); + const int online_type = mhp_online_type_from_str(buf); struct memory_block *mem = to_memory_block(dev); int ret; @@ -290,20 +290,20 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr, } /* - * phys_device is a bad name for this. What I really want - * is a way to differentiate between memory ranges that - * are part of physical devices that constitute - * a complete removable unit or fru. - * i.e. do these ranges belong to the same physical device, - * s.t. if I offline all of these sections I can then - * remove the physical device? + * Legacy interface that we cannot remove: s390x exposes the storage increment + * covered by a memory block, allowing for identifying which memory blocks + * comprise a storage increment. Since a memory block spans complete + * storage increments nowadays, this interface is basically unused. Other + * archs never exposed != 0. */ static ssize_t phys_device_show(struct device *dev, struct device_attribute *attr, char *buf) { struct memory_block *mem = to_memory_block(dev); + unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); - return sysfs_emit(buf, "%d\n", mem->phys_device); + return sysfs_emit(buf, "%d\n", + arch_get_memory_phys_device(start_pfn)); } #ifdef CONFIG_MEMORY_HOTREMOVE @@ -387,19 +387,19 @@ static ssize_t auto_online_blocks_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%s\n", - online_type_to_str[memhp_default_online_type]); + online_type_to_str[mhp_default_online_type]); } static ssize_t auto_online_blocks_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - const int online_type = memhp_online_type_from_str(buf); + const int online_type = mhp_online_type_from_str(buf); if (online_type < 0) return -EINVAL; - memhp_default_online_type = online_type; + mhp_default_online_type = online_type; return count; } @@ -488,11 +488,7 @@ static DEVICE_ATTR_WO(soft_offline_page); static DEVICE_ATTR_WO(hard_offline_page); #endif -/* - * Note that phys_device is optional. It is here to allow for - * differentiation between which *physical* devices each - * section belongs to... - */ +/* See phys_device_show(). */ int __weak arch_get_memory_phys_device(unsigned long start_pfn) { return 0; @@ -574,7 +570,6 @@ int register_memory(struct memory_block *memory) static int init_memory_block(unsigned long block_id, unsigned long state) { struct memory_block *mem; - unsigned long start_pfn; int ret = 0; mem = find_memory_block_by_id(block_id); @@ -588,8 +583,6 @@ static int init_memory_block(unsigned long block_id, unsigned long state) mem->start_section_nr = block_id * sections_per_block; mem->state = state; - start_pfn = section_nr_to_pfn(mem->start_section_nr); - mem->phys_device = arch_get_memory_phys_device(start_pfn); mem->nid = NUMA_NO_NODE; ret = register_memory(mem); diff --git a/drivers/base/node.c b/drivers/base/node.c index 04f71c7bc3f8..f449dbb2c746 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -372,14 +372,19 @@ static ssize_t node_read_meminfo(struct device *dev, struct pglist_data *pgdat = NODE_DATA(nid); struct sysinfo i; unsigned long sreclaimable, sunreclaimable; + unsigned long swapcached = 0; si_meminfo_node(&i, nid); sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B); sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B); +#ifdef CONFIG_SWAP + swapcached = node_page_state_pages(pgdat, NR_SWAPCACHE); +#endif len = sysfs_emit_at(buf, len, "Node %d MemTotal: %8lu kB\n" "Node %d MemFree: %8lu kB\n" "Node %d MemUsed: %8lu kB\n" + "Node %d SwapCached: %8lu kB\n" "Node %d Active: %8lu kB\n" "Node %d Inactive: %8lu kB\n" "Node %d Active(anon): %8lu kB\n" @@ -391,6 +396,7 @@ static ssize_t node_read_meminfo(struct device *dev, nid, K(i.totalram), nid, K(i.freeram), nid, K(i.totalram - i.freeram), + nid, K(swapcached), nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) + node_page_state(pgdat, NR_ACTIVE_FILE)), nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) + @@ -461,16 +467,11 @@ static ssize_t node_read_meminfo(struct device *dev, nid, K(sunreclaimable) #ifdef CONFIG_TRANSPARENT_HUGEPAGE , - nid, K(node_page_state(pgdat, NR_ANON_THPS) * - HPAGE_PMD_NR), - nid, K(node_page_state(pgdat, NR_SHMEM_THPS) * - HPAGE_PMD_NR), - nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) * - HPAGE_PMD_NR), - nid, K(node_page_state(pgdat, NR_FILE_THPS) * - HPAGE_PMD_NR), - nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED) * - HPAGE_PMD_NR) + nid, K(node_page_state(pgdat, NR_ANON_THPS)), + nid, K(node_page_state(pgdat, NR_SHMEM_THPS)), + nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)), + nid, K(node_page_state(pgdat, NR_FILE_THPS)), + nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED)) #endif ); len += hugetlb_report_node_meminfo(buf, len, nid); @@ -519,10 +520,14 @@ static ssize_t node_read_vmstat(struct device *dev, sum_zone_numa_state(nid, i)); #endif - for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) - len += sysfs_emit_at(buf, len, "%s %lu\n", - node_stat_name(i), - node_page_state_pages(pgdat, i)); + for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { + unsigned long pages = node_page_state_pages(pgdat, i); + + if (vmstat_item_print_in_thp(i)) + pages /= HPAGE_PMD_NR; + len += sysfs_emit_at(buf, len, "%s %lu\n", node_stat_name(i), + pages); + } return len; } diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 95fd1549f87d..6e1f8e0b661c 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -366,6 +366,8 @@ int devm_platform_get_irqs_affinity(struct platform_device *dev, return -ERANGE; nvec = platform_irq_count(dev); + if (nvec < 0) + return nvec; if (nvec < minvec) return -ENOSPC; @@ -571,7 +573,7 @@ static void platform_device_release(struct device *dev) struct platform_object *pa = container_of(dev, struct platform_object, pdev.dev); - of_device_node_put(&pa->pdev.dev); + of_node_put(pa->pdev.dev.of_node); kfree(pa->pdev.dev.platform_data); kfree(pa->pdev.mfd_cell); kfree(pa->pdev.resource); @@ -1461,13 +1463,16 @@ static int platform_remove(struct device *_dev) { struct platform_driver *drv = to_platform_driver(_dev->driver); struct platform_device *dev = to_platform_device(_dev); - int ret = 0; - if (drv->remove) - ret = drv->remove(dev); + if (drv->remove) { + int ret = drv->remove(dev); + + if (ret) + dev_warn(_dev, "remove callback returned a non-zero value. This will be ignored.\n"); + } dev_pm_domain_detach(_dev, true); - return ret; + return 0; } static void platform_shutdown(struct device *_dev) diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index ced6863a16a5..84d5acb6301b 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -23,6 +23,7 @@ enum pce_status { PCE_STATUS_NONE = 0, PCE_STATUS_ACQUIRED, + PCE_STATUS_PREPARED, PCE_STATUS_ENABLED, PCE_STATUS_ERROR, }; @@ -32,9 +33,113 @@ struct pm_clock_entry { char *con_id; struct clk *clk; enum pce_status status; + bool enabled_when_prepared; }; /** + * pm_clk_list_lock - ensure exclusive access for modifying the PM clock + * entry list. + * @psd: pm_subsys_data instance corresponding to the PM clock entry list + * and clk_op_might_sleep count to be modified. + * + * Get exclusive access before modifying the PM clock entry list and the + * clock_op_might_sleep count to guard against concurrent modifications. + * This also protects against a concurrent clock_op_might_sleep and PM clock + * entry list usage in pm_clk_suspend()/pm_clk_resume() that may or may not + * happen in atomic context, hence both the mutex and the spinlock must be + * taken here. + */ +static void pm_clk_list_lock(struct pm_subsys_data *psd) + __acquires(&psd->lock) +{ + mutex_lock(&psd->clock_mutex); + spin_lock_irq(&psd->lock); +} + +/** + * pm_clk_list_unlock - counterpart to pm_clk_list_lock(). + * @psd: the same pm_subsys_data instance previously passed to + * pm_clk_list_lock(). + */ +static void pm_clk_list_unlock(struct pm_subsys_data *psd) + __releases(&psd->lock) +{ + spin_unlock_irq(&psd->lock); + mutex_unlock(&psd->clock_mutex); +} + +/** + * pm_clk_op_lock - ensure exclusive access for performing clock operations. + * @psd: pm_subsys_data instance corresponding to the PM clock entry list + * and clk_op_might_sleep count being used. + * @flags: stored irq flags. + * @fn: string for the caller function's name. + * + * This is used by pm_clk_suspend() and pm_clk_resume() to guard + * against concurrent modifications to the clock entry list and the + * clock_op_might_sleep count. If clock_op_might_sleep is != 0 then + * only the mutex can be locked and those functions can only be used in + * non atomic context. If clock_op_might_sleep == 0 then these functions + * may be used in any context and only the spinlock can be locked. + * Returns -EINVAL if called in atomic context when clock ops might sleep. + */ +static int pm_clk_op_lock(struct pm_subsys_data *psd, unsigned long *flags, + const char *fn) + /* sparse annotations don't work here as exit state isn't static */ +{ + bool atomic_context = in_atomic() || irqs_disabled(); + +try_again: + spin_lock_irqsave(&psd->lock, *flags); + if (!psd->clock_op_might_sleep) { + /* the __release is there to work around sparse limitations */ + __release(&psd->lock); + return 0; + } + + /* bail out if in atomic context */ + if (atomic_context) { + pr_err("%s: atomic context with clock_ops_might_sleep = %d", + fn, psd->clock_op_might_sleep); + spin_unlock_irqrestore(&psd->lock, *flags); + might_sleep(); + return -EPERM; + } + + /* we must switch to the mutex */ + spin_unlock_irqrestore(&psd->lock, *flags); + mutex_lock(&psd->clock_mutex); + + /* + * There was a possibility for psd->clock_op_might_sleep + * to become 0 above. Keep the mutex only if not the case. + */ + if (likely(psd->clock_op_might_sleep)) + return 0; + + mutex_unlock(&psd->clock_mutex); + goto try_again; +} + +/** + * pm_clk_op_unlock - counterpart to pm_clk_op_lock(). + * @psd: the same pm_subsys_data instance previously passed to + * pm_clk_op_lock(). + * @flags: irq flags provided by pm_clk_op_lock(). + */ +static void pm_clk_op_unlock(struct pm_subsys_data *psd, unsigned long *flags) + /* sparse annotations don't work here as entry state isn't static */ +{ + if (psd->clock_op_might_sleep) { + mutex_unlock(&psd->clock_mutex); + } else { + /* the __acquire is there to work around sparse limitations */ + __acquire(&psd->lock); + spin_unlock_irqrestore(&psd->lock, *flags); + } +} + +/** * pm_clk_enable - Enable a clock, reporting any errors * @dev: The device for the given clock * @ce: PM clock entry corresponding to the clock. @@ -43,14 +148,21 @@ static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce { int ret; - if (ce->status < PCE_STATUS_ERROR) { + switch (ce->status) { + case PCE_STATUS_ACQUIRED: + ret = clk_prepare_enable(ce->clk); + break; + case PCE_STATUS_PREPARED: ret = clk_enable(ce->clk); - if (!ret) - ce->status = PCE_STATUS_ENABLED; - else - dev_err(dev, "%s: failed to enable clk %p, error %d\n", - __func__, ce->clk, ret); + break; + default: + return; } + if (!ret) + ce->status = PCE_STATUS_ENABLED; + else + dev_err(dev, "%s: failed to enable clk %p, error %d\n", + __func__, ce->clk, ret); } /** @@ -64,17 +176,20 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) ce->clk = clk_get(dev, ce->con_id); if (IS_ERR(ce->clk)) { ce->status = PCE_STATUS_ERROR; + return; + } else if (clk_is_enabled_when_prepared(ce->clk)) { + /* we defer preparing the clock in that case */ + ce->status = PCE_STATUS_ACQUIRED; + ce->enabled_when_prepared = true; + } else if (clk_prepare(ce->clk)) { + ce->status = PCE_STATUS_ERROR; + dev_err(dev, "clk_prepare() failed\n"); + return; } else { - if (clk_prepare(ce->clk)) { - ce->status = PCE_STATUS_ERROR; - dev_err(dev, "clk_prepare() failed\n"); - } else { - ce->status = PCE_STATUS_ACQUIRED; - dev_dbg(dev, - "Clock %pC con_id %s managed by runtime PM.\n", - ce->clk, ce->con_id); - } + ce->status = PCE_STATUS_PREPARED; } + dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n", + ce->clk, ce->con_id); } static int __pm_clk_add(struct device *dev, const char *con_id, @@ -106,9 +221,11 @@ static int __pm_clk_add(struct device *dev, const char *con_id, pm_clk_acquire(dev, ce); - spin_lock_irq(&psd->lock); + pm_clk_list_lock(psd); list_add_tail(&ce->node, &psd->clock_list); - spin_unlock_irq(&psd->lock); + if (ce->enabled_when_prepared) + psd->clock_op_might_sleep++; + pm_clk_list_unlock(psd); return 0; } @@ -239,14 +356,20 @@ static void __pm_clk_remove(struct pm_clock_entry *ce) if (!ce) return; - if (ce->status < PCE_STATUS_ERROR) { - if (ce->status == PCE_STATUS_ENABLED) - clk_disable(ce->clk); - - if (ce->status >= PCE_STATUS_ACQUIRED) { - clk_unprepare(ce->clk); + switch (ce->status) { + case PCE_STATUS_ENABLED: + clk_disable(ce->clk); + fallthrough; + case PCE_STATUS_PREPARED: + clk_unprepare(ce->clk); + fallthrough; + case PCE_STATUS_ACQUIRED: + case PCE_STATUS_ERROR: + if (!IS_ERR(ce->clk)) clk_put(ce->clk); - } + break; + default: + break; } kfree(ce->con_id); @@ -269,7 +392,7 @@ void pm_clk_remove(struct device *dev, const char *con_id) if (!psd) return; - spin_lock_irq(&psd->lock); + pm_clk_list_lock(psd); list_for_each_entry(ce, &psd->clock_list, node) { if (!con_id && !ce->con_id) @@ -280,12 +403,14 @@ void pm_clk_remove(struct device *dev, const char *con_id) goto remove; } - spin_unlock_irq(&psd->lock); + pm_clk_list_unlock(psd); return; remove: list_del(&ce->node); - spin_unlock_irq(&psd->lock); + if (ce->enabled_when_prepared) + psd->clock_op_might_sleep--; + pm_clk_list_unlock(psd); __pm_clk_remove(ce); } @@ -307,19 +432,21 @@ void pm_clk_remove_clk(struct device *dev, struct clk *clk) if (!psd || !clk) return; - spin_lock_irq(&psd->lock); + pm_clk_list_lock(psd); list_for_each_entry(ce, &psd->clock_list, node) { if (clk == ce->clk) goto remove; } - spin_unlock_irq(&psd->lock); + pm_clk_list_unlock(psd); return; remove: list_del(&ce->node); - spin_unlock_irq(&psd->lock); + if (ce->enabled_when_prepared) + psd->clock_op_might_sleep--; + pm_clk_list_unlock(psd); __pm_clk_remove(ce); } @@ -330,13 +457,16 @@ EXPORT_SYMBOL_GPL(pm_clk_remove_clk); * @dev: Device to initialize the list of PM clocks for. * * Initialize the lock and clock_list members of the device's pm_subsys_data - * object. + * object, set the count of clocks that might sleep to 0. */ void pm_clk_init(struct device *dev) { struct pm_subsys_data *psd = dev_to_psd(dev); - if (psd) + if (psd) { INIT_LIST_HEAD(&psd->clock_list); + mutex_init(&psd->clock_mutex); + psd->clock_op_might_sleep = 0; + } } EXPORT_SYMBOL_GPL(pm_clk_init); @@ -372,12 +502,13 @@ void pm_clk_destroy(struct device *dev) INIT_LIST_HEAD(&list); - spin_lock_irq(&psd->lock); + pm_clk_list_lock(psd); list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node) list_move(&ce->node, &list); + psd->clock_op_might_sleep = 0; - spin_unlock_irq(&psd->lock); + pm_clk_list_unlock(psd); dev_pm_put_subsys_data(dev); @@ -397,23 +528,30 @@ int pm_clk_suspend(struct device *dev) struct pm_subsys_data *psd = dev_to_psd(dev); struct pm_clock_entry *ce; unsigned long flags; + int ret; dev_dbg(dev, "%s()\n", __func__); if (!psd) return 0; - spin_lock_irqsave(&psd->lock, flags); + ret = pm_clk_op_lock(psd, &flags, __func__); + if (ret) + return ret; list_for_each_entry_reverse(ce, &psd->clock_list, node) { - if (ce->status < PCE_STATUS_ERROR) { - if (ce->status == PCE_STATUS_ENABLED) + if (ce->status == PCE_STATUS_ENABLED) { + if (ce->enabled_when_prepared) { + clk_disable_unprepare(ce->clk); + ce->status = PCE_STATUS_ACQUIRED; + } else { clk_disable(ce->clk); - ce->status = PCE_STATUS_ACQUIRED; + ce->status = PCE_STATUS_PREPARED; + } } } - spin_unlock_irqrestore(&psd->lock, flags); + pm_clk_op_unlock(psd, &flags); return 0; } @@ -428,18 +566,21 @@ int pm_clk_resume(struct device *dev) struct pm_subsys_data *psd = dev_to_psd(dev); struct pm_clock_entry *ce; unsigned long flags; + int ret; dev_dbg(dev, "%s()\n", __func__); if (!psd) return 0; - spin_lock_irqsave(&psd->lock, flags); + ret = pm_clk_op_lock(psd, &flags, __func__); + if (ret) + return ret; list_for_each_entry(ce, &psd->clock_list, node) __pm_clk_enable(dev, ce); - spin_unlock_irqrestore(&psd->lock, flags); + pm_clk_op_unlock(psd, &flags); return 0; } diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 9a14eedacb92..78c310d3179d 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -297,6 +297,18 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd, return state; } +static int genpd_xlate_performance_state(struct generic_pm_domain *genpd, + struct generic_pm_domain *parent, + unsigned int pstate) +{ + if (!parent->set_performance_state) + return pstate; + + return dev_pm_opp_xlate_performance_state(genpd->opp_table, + parent->opp_table, + pstate); +} + static int _genpd_set_performance_state(struct generic_pm_domain *genpd, unsigned int state, int depth) { @@ -311,13 +323,8 @@ static int _genpd_set_performance_state(struct generic_pm_domain *genpd, list_for_each_entry(link, &genpd->child_links, child_node) { parent = link->parent; - if (!parent->set_performance_state) - continue; - /* Find parent's performance state */ - ret = dev_pm_opp_xlate_performance_state(genpd->opp_table, - parent->opp_table, - state); + ret = genpd_xlate_performance_state(genpd, parent, state); if (unlikely(ret < 0)) goto err; @@ -339,9 +346,11 @@ static int _genpd_set_performance_state(struct generic_pm_domain *genpd, goto err; } - ret = genpd->set_performance_state(genpd, state); - if (ret) - goto err; + if (genpd->set_performance_state) { + ret = genpd->set_performance_state(genpd, state); + if (ret) + goto err; + } genpd->performance_state = state; return 0; @@ -352,9 +361,6 @@ err: child_node) { parent = link->parent; - if (!parent->set_performance_state) - continue; - genpd_lock_nested(parent, depth + 1); parent_state = link->prev_performance_state; @@ -399,9 +405,6 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) if (!genpd) return -ENODEV; - if (unlikely(!genpd->set_performance_state)) - return -EINVAL; - if (WARN_ON(!dev->power.subsys_data || !dev->power.subsys_data->domain_data)) return -EINVAL; @@ -423,6 +426,35 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) } EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state); +/** + * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup. + * + * @dev: Device to handle + * @next: impending interrupt/wakeup for the device + * + * + * Allow devices to inform of the next wakeup. It's assumed that the users + * guarantee that the genpd wouldn't be detached while this routine is getting + * called. Additionally, it's also assumed that @dev isn't runtime suspended + * (RPM_SUSPENDED)." + * Although devices are expected to update the next_wakeup after the end of + * their usecase as well, it is possible the devices themselves may not know + * about that, so stale @next will be ignored when powering off the domain. + */ +void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next) +{ + struct generic_pm_domain_data *gpd_data; + struct generic_pm_domain *genpd; + + genpd = dev_to_genpd_safe(dev); + if (!genpd) + return; + + gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); + gpd_data->next_wakeup = next; +} +EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup); + static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) { unsigned int state_idx = genpd->state_idx; @@ -934,8 +966,7 @@ static int genpd_runtime_resume(struct device *dev) err_stop: genpd_stop_dev(genpd, dev); err_poweroff: - if (!pm_runtime_is_irq_safe(dev) || - (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) { + if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) { genpd_lock(genpd); genpd_power_off(genpd, true, 0); genpd_unlock(genpd); @@ -1465,6 +1496,7 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev) gpd_data->td.constraint_changed = true; gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS; gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; + gpd_data->next_wakeup = KTIME_MAX; spin_lock_irq(&dev->power.lock); @@ -2164,6 +2196,7 @@ static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, cp->node = of_node_get(np); cp->data = data; cp->xlate = xlate; + fwnode_dev_initialized(&np->fwnode, true); mutex_lock(&of_genpd_mutex); list_add(&cp->link, &of_genpd_providers); @@ -2353,6 +2386,7 @@ void of_genpd_del_provider(struct device_node *np) } } + fwnode_dev_initialized(&cp->node->fwnode, false); list_del(&cp->link); of_node_put(cp->node); kfree(cp); @@ -2463,7 +2497,7 @@ int of_genpd_add_subdomain(struct of_phandle_args *parent_spec, out: mutex_unlock(&gpd_list_lock); - return ret; + return ret == -ENOENT ? -EPROBE_DEFER : ret; } EXPORT_SYMBOL_GPL(of_genpd_add_subdomain); @@ -2952,7 +2986,15 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev) else WARN_ON(1); - seq_puts(s, p); + seq_printf(s, "%-25s ", p); +} + +static void perf_status_str(struct seq_file *s, struct device *dev) +{ + struct generic_pm_domain_data *gpd_data; + + gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); + seq_put_decimal_ull(s, "", gpd_data->performance_state); } static int genpd_summary_one(struct seq_file *s, @@ -2980,7 +3022,7 @@ static int genpd_summary_one(struct seq_file *s, else snprintf(state, sizeof(state), "%s", status_lookup[genpd->status]); - seq_printf(s, "%-30s %-15s ", genpd->name, state); + seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state); /* * Modifications on the list require holding locks on both @@ -2988,6 +3030,8 @@ static int genpd_summary_one(struct seq_file *s, * Also genpd->name is immutable. */ list_for_each_entry(link, &genpd->parent_links, parent_node) { + if (list_is_first(&link->parent_node, &genpd->parent_links)) + seq_printf(s, "\n%48s", " "); seq_printf(s, "%s", link->child->name); if (!list_is_last(&link->parent_node, &genpd->parent_links)) seq_puts(s, ", "); @@ -3002,6 +3046,7 @@ static int genpd_summary_one(struct seq_file *s, seq_printf(s, "\n %-50s ", kobj_path); rtpm_status_str(s, pm_data->dev); + perf_status_str(s, pm_data->dev); kfree(kobj_path); } @@ -3017,9 +3062,9 @@ static int summary_show(struct seq_file *s, void *data) struct generic_pm_domain *genpd; int ret = 0; - seq_puts(s, "domain status children\n"); + seq_puts(s, "domain status children performance\n"); seq_puts(s, " /device runtime status\n"); - seq_puts(s, "----------------------------------------------------------------------\n"); + seq_puts(s, "----------------------------------------------------------------------------------------------\n"); ret = mutex_lock_interruptible(&gpd_list_lock); if (ret) diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index 490ed7deb99a..c6c218758f0b 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c @@ -117,6 +117,55 @@ static bool default_suspend_ok(struct device *dev) return td->cached_suspend_ok; } +static void update_domain_next_wakeup(struct generic_pm_domain *genpd, ktime_t now) +{ + ktime_t domain_wakeup = KTIME_MAX; + ktime_t next_wakeup; + struct pm_domain_data *pdd; + struct gpd_link *link; + + if (!(genpd->flags & GENPD_FLAG_MIN_RESIDENCY)) + return; + + /* + * Devices that have a predictable wakeup pattern, may specify + * their next wakeup. Let's find the next wakeup from all the + * devices attached to this domain and from all the sub-domains. + * It is possible that component's a next wakeup may have become + * stale when we read that here. We will ignore to ensure the domain + * is able to enter its optimal idle state. + */ + list_for_each_entry(pdd, &genpd->dev_list, list_node) { + next_wakeup = to_gpd_data(pdd)->next_wakeup; + if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now)) + if (ktime_before(next_wakeup, domain_wakeup)) + domain_wakeup = next_wakeup; + } + + list_for_each_entry(link, &genpd->parent_links, parent_node) { + next_wakeup = link->child->next_wakeup; + if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now)) + if (ktime_before(next_wakeup, domain_wakeup)) + domain_wakeup = next_wakeup; + } + + genpd->next_wakeup = domain_wakeup; +} + +static bool next_wakeup_allows_state(struct generic_pm_domain *genpd, + unsigned int state, ktime_t now) +{ + ktime_t domain_wakeup = genpd->next_wakeup; + s64 idle_time_ns, min_sleep_ns; + + min_sleep_ns = genpd->states[state].power_off_latency_ns + + genpd->states[state].residency_ns; + + idle_time_ns = ktime_to_ns(ktime_sub(domain_wakeup, now)); + + return idle_time_ns >= min_sleep_ns; +} + static bool __default_power_down_ok(struct dev_pm_domain *pd, unsigned int state) { @@ -201,16 +250,41 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd, } /** - * default_power_down_ok - Default generic PM domain power off governor routine. + * _default_power_down_ok - Default generic PM domain power off governor routine. * @pd: PM domain to check. * * This routine must be executed under the PM domain's lock. */ -static bool default_power_down_ok(struct dev_pm_domain *pd) +static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now) { struct generic_pm_domain *genpd = pd_to_genpd(pd); + int state_idx = genpd->state_count - 1; struct gpd_link *link; + /* + * Find the next wakeup from devices that can determine their own wakeup + * to find when the domain would wakeup and do it for every device down + * the hierarchy. It is not worth while to sleep if the state's residency + * cannot be met. + */ + update_domain_next_wakeup(genpd, now); + if ((genpd->flags & GENPD_FLAG_MIN_RESIDENCY) && (genpd->next_wakeup != KTIME_MAX)) { + /* Let's find out the deepest domain idle state, the devices prefer */ + while (state_idx >= 0) { + if (next_wakeup_allows_state(genpd, state_idx, now)) { + genpd->max_off_time_changed = true; + break; + } + state_idx--; + } + + if (state_idx < 0) { + state_idx = 0; + genpd->cached_power_down_ok = false; + goto done; + } + } + if (!genpd->max_off_time_changed) { genpd->state_idx = genpd->cached_power_down_state_idx; return genpd->cached_power_down_ok; @@ -228,21 +302,30 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) genpd->max_off_time_ns = -1; genpd->max_off_time_changed = false; genpd->cached_power_down_ok = true; - genpd->state_idx = genpd->state_count - 1; - /* Find a state to power down to, starting from the deepest. */ - while (!__default_power_down_ok(pd, genpd->state_idx)) { - if (genpd->state_idx == 0) { + /* + * Find a state to power down to, starting from the state + * determined by the next wakeup. + */ + while (!__default_power_down_ok(pd, state_idx)) { + if (state_idx == 0) { genpd->cached_power_down_ok = false; break; } - genpd->state_idx--; + state_idx--; } +done: + genpd->state_idx = state_idx; genpd->cached_power_down_state_idx = genpd->state_idx; return genpd->cached_power_down_ok; } +static bool default_power_down_ok(struct dev_pm_domain *pd) +{ + return _default_power_down_ok(pd, ktime_get()); +} + static bool always_on_power_down_ok(struct dev_pm_domain *domain) { return false; @@ -254,11 +337,12 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd) struct generic_pm_domain *genpd = pd_to_genpd(pd); struct cpuidle_device *dev; ktime_t domain_wakeup, next_hrtimer; + ktime_t now = ktime_get(); s64 idle_duration_ns; int cpu, i; /* Validate dev PM QoS constraints. */ - if (!default_power_down_ok(pd)) + if (!_default_power_down_ok(pd, now)) return false; if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN)) @@ -280,7 +364,7 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd) } /* The minimum idle duration is from now - until the next wakeup. */ - idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, ktime_get())); + idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, now)); if (idle_duration_ns <= 0) return false; diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 46793276598d..f893c3c5af07 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -16,6 +16,7 @@ */ #define pr_fmt(fmt) "PM: " fmt +#define dev_fmt pr_fmt #include <linux/device.h> #include <linux/export.h> @@ -449,8 +450,8 @@ static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info) static void pm_dev_err(struct device *dev, pm_message_t state, const char *info, int error) { - pr_err("Device %s failed to %s%s: error %d\n", - dev_name(dev), pm_verb(state.event), info, error); + dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info, + error); } static void dpm_show_time(ktime_t starttime, pm_message_t state, int error, @@ -1897,8 +1898,8 @@ int dpm_prepare(pm_message_t state) error = 0; continue; } - pr_info("Device %s not prepared for power transition: code %d\n", - dev_name(dev), error); + dev_info(dev, "not prepared for power transition: code %d\n", + error); put_device(dev); break; } diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index bfda153b1a41..a46a7e30881b 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1100,7 +1100,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_resume); * suspending the device when both its runtime PM status is %RPM_ACTIVE and its * runtime PM usage counter is not zero. * - * The caller is resposible for decrementing the runtime PM usage counter of + * The caller is responsible for decrementing the runtime PM usage counter of * @dev after this function has returned a positive value for it. */ int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count) diff --git a/drivers/base/property.c b/drivers/base/property.c index 35b95c6ac0c6..1421e9548857 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -837,9 +837,15 @@ EXPORT_SYMBOL_GPL(fwnode_handle_put); /** * fwnode_device_is_available - check if a device is available for use * @fwnode: Pointer to the fwnode of the device. + * + * For fwnode node types that don't implement the .device_is_available() + * operation, this function returns true. */ bool fwnode_device_is_available(const struct fwnode_handle *fwnode) { + if (!fwnode_has_op(fwnode, device_is_available)) + return true; + return fwnode_call_bool_op(fwnode, device_is_available); } EXPORT_SYMBOL_GPL(fwnode_device_is_available); @@ -1209,7 +1215,14 @@ fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode, best_ep_id = fwnode_ep.id; } - return best_ep; + if (best_ep) + return best_ep; + + if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) + return fwnode_graph_get_endpoint_by_id(fwnode->secondary, port, + endpoint, flags); + + return NULL; } EXPORT_SYMBOL_GPL(fwnode_graph_get_endpoint_by_id); diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index 7f4b3b62492c..f2469d3435ca 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -68,7 +68,7 @@ static int regcache_hw_init(struct regmap *map) map->cache_bypass = cache_bypass; if (ret == 0) { map->reg_defaults_raw = tmp_buf; - map->cache_free = 1; + map->cache_free = true; } else { kfree(tmp_buf); } diff --git a/drivers/base/regmap/regmap-sdw-mbq.c b/drivers/base/regmap/regmap-sdw-mbq.c index 8ce30650b97c..fe3ac26b66ad 100644 --- a/drivers/base/regmap/regmap-sdw-mbq.c +++ b/drivers/base/regmap/regmap-sdw-mbq.c @@ -15,11 +15,11 @@ static int regmap_sdw_mbq_write(void *context, unsigned int reg, unsigned int va struct sdw_slave *slave = dev_to_sdw_dev(dev); int ret; - ret = sdw_write(slave, SDW_SDCA_MBQ_CTL(reg), (val >> 8) & 0xff); + ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg), (val >> 8) & 0xff); if (ret < 0) return ret; - return sdw_write(slave, reg, val & 0xff); + return sdw_write_no_pm(slave, reg, val & 0xff); } static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *val) @@ -29,11 +29,11 @@ static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *va int read0; int read1; - read0 = sdw_read(slave, reg); + read0 = sdw_read_no_pm(slave, reg); if (read0 < 0) return read0; - read1 = sdw_read(slave, SDW_SDCA_MBQ_CTL(reg)); + read1 = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg)); if (read1 < 0) return read1; @@ -98,4 +98,4 @@ struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw, EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq); MODULE_DESCRIPTION("Regmap SoundWire MBQ Module"); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c index c83be26434e7..966de8a136d9 100644 --- a/drivers/base/regmap/regmap-sdw.c +++ b/drivers/base/regmap/regmap-sdw.c @@ -13,7 +13,7 @@ static int regmap_sdw_write(void *context, unsigned int reg, unsigned int val) struct device *dev = context; struct sdw_slave *slave = dev_to_sdw_dev(dev); - return sdw_write(slave, reg, val); + return sdw_write_no_pm(slave, reg, val); } static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val) @@ -22,7 +22,7 @@ static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val) struct sdw_slave *slave = dev_to_sdw_dev(dev); int read; - read = sdw_read(slave, reg); + read = sdw_read_no_pm(slave, reg); if (read < 0) return read; diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index 4a4b2008fbc2..37179a8b1ceb 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -24,6 +24,7 @@ struct swnode { struct swnode *parent; unsigned int allocated:1; + unsigned int managed:1; }; static DEFINE_IDA(swnode_root_ids); @@ -48,6 +49,19 @@ EXPORT_SYMBOL_GPL(is_software_node); struct swnode, fwnode) : NULL; \ }) +static inline struct swnode *dev_to_swnode(struct device *dev) +{ + struct fwnode_handle *fwnode = dev_fwnode(dev); + + if (!fwnode) + return NULL; + + if (!is_software_node(fwnode)) + fwnode = fwnode->secondary; + + return to_swnode(fwnode); +} + static struct swnode * software_node_to_swnode(const struct software_node *node) { @@ -443,14 +457,18 @@ software_node_get_next_child(const struct fwnode_handle *fwnode, struct swnode *c = to_swnode(child); if (!p || list_empty(&p->children) || - (c && list_is_last(&c->entry, &p->children))) + (c && list_is_last(&c->entry, &p->children))) { + fwnode_handle_put(child); return NULL; + } if (c) c = list_next_entry(c, entry); else c = list_first_entry(&p->children, struct swnode, entry); - return &c->fwnode; + + fwnode_handle_put(child); + return fwnode_handle_get(&c->fwnode); } static struct fwnode_handle * @@ -536,6 +554,115 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode, return 0; } +static struct fwnode_handle * +swnode_graph_find_next_port(const struct fwnode_handle *parent, + struct fwnode_handle *port) +{ + struct fwnode_handle *old = port; + + while ((port = software_node_get_next_child(parent, old))) { + /* + * fwnode ports have naming style "port@", so we search for any + * children that follow that convention. + */ + if (!strncmp(to_swnode(port)->node->name, "port@", + strlen("port@"))) + return port; + old = port; + } + + return NULL; +} + +static struct fwnode_handle * +software_node_graph_get_next_endpoint(const struct fwnode_handle *fwnode, + struct fwnode_handle *endpoint) +{ + struct swnode *swnode = to_swnode(fwnode); + struct fwnode_handle *parent; + struct fwnode_handle *port; + + if (!swnode) + return NULL; + + if (endpoint) { + port = software_node_get_parent(endpoint); + parent = software_node_get_parent(port); + } else { + parent = software_node_get_named_child_node(fwnode, "ports"); + if (!parent) + parent = software_node_get(&swnode->fwnode); + + port = swnode_graph_find_next_port(parent, NULL); + } + + for (; port; port = swnode_graph_find_next_port(parent, port)) { + endpoint = software_node_get_next_child(port, endpoint); + if (endpoint) { + fwnode_handle_put(port); + break; + } + } + + fwnode_handle_put(parent); + + return endpoint; +} + +static struct fwnode_handle * +software_node_graph_get_remote_endpoint(const struct fwnode_handle *fwnode) +{ + struct swnode *swnode = to_swnode(fwnode); + const struct software_node_ref_args *ref; + const struct property_entry *prop; + + if (!swnode) + return NULL; + + prop = property_entry_get(swnode->node->properties, "remote-endpoint"); + if (!prop || prop->type != DEV_PROP_REF || prop->is_inline) + return NULL; + + ref = prop->pointer; + + return software_node_get(software_node_fwnode(ref[0].node)); +} + +static struct fwnode_handle * +software_node_graph_get_port_parent(struct fwnode_handle *fwnode) +{ + struct swnode *swnode = to_swnode(fwnode); + + swnode = swnode->parent; + if (swnode && !strcmp(swnode->node->name, "ports")) + swnode = swnode->parent; + + return swnode ? software_node_get(&swnode->fwnode) : NULL; +} + +static int +software_node_graph_parse_endpoint(const struct fwnode_handle *fwnode, + struct fwnode_endpoint *endpoint) +{ + struct swnode *swnode = to_swnode(fwnode); + const char *parent_name = swnode->parent->node->name; + int ret; + + if (strlen("port@") >= strlen(parent_name) || + strncmp(parent_name, "port@", strlen("port@"))) + return -EINVAL; + + /* Ports have naming style "port@n", we need to select the n */ + ret = kstrtou32(parent_name + strlen("port@"), 10, &endpoint->port); + if (ret) + return ret; + + endpoint->id = swnode->id; + endpoint->local_fwnode = fwnode; + + return 0; +} + static const struct fwnode_operations software_node_ops = { .get = software_node_get, .put = software_node_put, @@ -547,7 +674,11 @@ static const struct fwnode_operations software_node_ops = { .get_parent = software_node_get_parent, .get_next_child_node = software_node_get_next_child, .get_named_child_node = software_node_get_named_child_node, - .get_reference_args = software_node_get_reference_args + .get_reference_args = software_node_get_reference_args, + .graph_get_next_endpoint = software_node_graph_get_next_endpoint, + .graph_get_remote_endpoint = software_node_graph_get_remote_endpoint, + .graph_get_port_parent = software_node_graph_get_port_parent, + .graph_parse_endpoint = software_node_graph_parse_endpoint, }; /* -------------------------------------------------------------------------- */ @@ -688,7 +819,11 @@ out_err: * software_node_register_nodes - Register an array of software nodes * @nodes: Zero terminated array of software nodes to be registered * - * Register multiple software nodes at once. + * Register multiple software nodes at once. If any node in the array + * has its .parent pointer set (which can only be to another software_node), + * then its parent **must** have been registered before it is; either outside + * of this function or by ordering the array such that parent comes before + * child. */ int software_node_register_nodes(const struct software_node *nodes) { @@ -696,14 +831,23 @@ int software_node_register_nodes(const struct software_node *nodes) int i; for (i = 0; nodes[i].name; i++) { - ret = software_node_register(&nodes[i]); - if (ret) { - software_node_unregister_nodes(nodes); - return ret; + const struct software_node *parent = nodes[i].parent; + + if (parent && !software_node_to_swnode(parent)) { + ret = -EINVAL; + goto err_unregister_nodes; } + + ret = software_node_register(&nodes[i]); + if (ret) + goto err_unregister_nodes; } return 0; + +err_unregister_nodes: + software_node_unregister_nodes(nodes); + return ret; } EXPORT_SYMBOL_GPL(software_node_register_nodes); @@ -711,18 +855,23 @@ EXPORT_SYMBOL_GPL(software_node_register_nodes); * software_node_unregister_nodes - Unregister an array of software nodes * @nodes: Zero terminated array of software nodes to be unregistered * - * Unregister multiple software nodes at once. + * Unregister multiple software nodes at once. If parent pointers are set up + * in any of the software nodes then the array **must** be ordered such that + * parents come before their children. * - * NOTE: Be careful using this call if the nodes had parent pointers set up in - * them before registering. If so, it is wiser to remove the nodes - * individually, in the correct order (child before parent) instead of relying - * on the sequential order of the list of nodes in the array. + * NOTE: If you are uncertain whether the array is ordered such that + * parents will be unregistered before their children, it is wiser to + * remove the nodes individually, in the correct order (child before + * parent). */ void software_node_unregister_nodes(const struct software_node *nodes) { - int i; + unsigned int i = 0; + + while (nodes[i].name) + i++; - for (i = 0; nodes[i].name; i++) + while (i--) software_node_unregister(&nodes[i]); } EXPORT_SYMBOL_GPL(software_node_unregister_nodes); @@ -757,16 +906,23 @@ EXPORT_SYMBOL_GPL(software_node_register_node_group); * software_node_unregister_node_group - Unregister a group of software nodes * @node_group: NULL terminated array of software node pointers to be unregistered * - * Unregister multiple software nodes at once. + * Unregister multiple software nodes at once. The array will be unwound in + * reverse order (i.e. last entry first) and thus if any members of the array are + * children of another member then the children must appear later in the list such + * that they are unregistered first. */ -void software_node_unregister_node_group(const struct software_node **node_group) +void software_node_unregister_node_group( + const struct software_node **node_group) { - unsigned int i; + unsigned int i = 0; if (!node_group) return; - for (i = 0; node_group[i]; i++) + while (node_group[i]) + i++; + + while (i--) software_node_unregister(node_group[i]); } EXPORT_SYMBOL_GPL(software_node_unregister_node_group); @@ -843,22 +999,99 @@ void fwnode_remove_software_node(struct fwnode_handle *fwnode) } EXPORT_SYMBOL_GPL(fwnode_remove_software_node); +/** + * device_add_software_node - Assign software node to a device + * @dev: The device the software node is meant for. + * @swnode: The software node. + * + * This function will register @swnode and make it the secondary firmware node + * pointer of @dev. If @dev has no primary node, then @swnode will become the primary + * node. + */ +int device_add_software_node(struct device *dev, const struct software_node *swnode) +{ + int ret; + + /* Only one software node per device. */ + if (dev_to_swnode(dev)) + return -EBUSY; + + ret = software_node_register(swnode); + if (ret) + return ret; + + set_secondary_fwnode(dev, software_node_fwnode(swnode)); + + return 0; +} +EXPORT_SYMBOL_GPL(device_add_software_node); + +/** + * device_remove_software_node - Remove device's software node + * @dev: The device with the software node. + * + * This function will unregister the software node of @dev. + */ +void device_remove_software_node(struct device *dev) +{ + struct swnode *swnode; + + swnode = dev_to_swnode(dev); + if (!swnode) + return; + + software_node_notify(dev, KOBJ_REMOVE); + set_secondary_fwnode(dev, NULL); + kobject_put(&swnode->kobj); +} +EXPORT_SYMBOL_GPL(device_remove_software_node); + +/** + * device_create_managed_software_node - Create a software node for a device + * @dev: The device the software node is assigned to. + * @properties: Device properties for the software node. + * @parent: Parent of the software node. + * + * Creates a software node as a managed resource for @dev, which means the + * lifetime of the newly created software node is tied to the lifetime of @dev. + * Software nodes created with this function should not be reused or shared + * because of that. The function takes a deep copy of @properties for the + * software node. + * + * Since the new software node is assigned directly to @dev, and since it should + * not be shared, it is not returned to the caller. The function returns 0 on + * success, and errno in case of an error. + */ +int device_create_managed_software_node(struct device *dev, + const struct property_entry *properties, + const struct software_node *parent) +{ + struct fwnode_handle *p = software_node_fwnode(parent); + struct fwnode_handle *fwnode; + + if (parent && !p) + return -EINVAL; + + fwnode = fwnode_create_software_node(properties, p); + if (IS_ERR(fwnode)) + return PTR_ERR(fwnode); + + to_swnode(fwnode)->managed = true; + set_secondary_fwnode(dev, fwnode); + + return 0; +} +EXPORT_SYMBOL_GPL(device_create_managed_software_node); + int software_node_notify(struct device *dev, unsigned long action) { - struct fwnode_handle *fwnode = dev_fwnode(dev); struct swnode *swnode; int ret; - if (!fwnode) - return 0; - - if (!is_software_node(fwnode)) - fwnode = fwnode->secondary; - if (!is_software_node(fwnode)) + swnode = dev_to_swnode(dev); + if (!swnode) return 0; - swnode = to_swnode(fwnode); - switch (action) { case KOBJ_ADD: ret = sysfs_create_link(&dev->kobj, &swnode->kobj, @@ -878,6 +1111,11 @@ int software_node_notify(struct device *dev, unsigned long action) sysfs_remove_link(&swnode->kobj, dev_name(dev)); sysfs_remove_link(&dev->kobj, "software_node"); kobject_put(&swnode->kobj); + + if (swnode->managed) { + set_secondary_fwnode(dev, NULL); + kobject_put(&swnode->kobj); + } break; default: break; diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile index 3ca56367c84b..2f15fae8625f 100644 --- a/drivers/base/test/Makefile +++ b/drivers/base/test/Makefile @@ -2,3 +2,4 @@ obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o obj-$(CONFIG_KUNIT_DRIVER_PE_TEST) += property-entry-test.o +CFLAGS_REMOVE_property-entry-test.o += -fplugin-arg-structleak_plugin-byref -fplugin-arg-structleak_plugin-byref-all |