diff options
Diffstat (limited to 'drivers/base')
| -rw-r--r-- | drivers/base/arch_topology.c | 100 | ||||
| -rw-r--r-- | drivers/base/base.h | 1 | ||||
| -rw-r--r-- | drivers/base/cacheinfo.c | 145 | ||||
| -rw-r--r-- | drivers/base/core.c | 123 | ||||
| -rw-r--r-- | drivers/base/cpu.c | 8 | ||||
| -rw-r--r-- | drivers/base/dd.c | 59 | ||||
| -rw-r--r-- | drivers/base/devtmpfs.c | 1 | ||||
| -rw-r--r-- | drivers/base/firmware_loader/main.c | 4 | ||||
| -rw-r--r-- | drivers/base/firmware_loader/sysfs.c | 10 | ||||
| -rw-r--r-- | drivers/base/node.c | 4 | ||||
| -rw-r--r-- | drivers/base/power/domain.c | 5 | ||||
| -rw-r--r-- | drivers/base/power/runtime.c | 6 | ||||
| -rw-r--r-- | drivers/base/power/wakeup.c | 30 | ||||
| -rw-r--r-- | drivers/base/regmap/regcache.c | 11 | ||||
| -rw-r--r-- | drivers/base/regmap/regmap-irq.c | 432 | ||||
| -rw-r--r-- | drivers/base/regmap/regmap.c | 27 | ||||
| -rw-r--r-- | drivers/base/topology.c | 32 |
17 files changed, 682 insertions, 316 deletions
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 579c851a2bd7..0424b59b695e 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -7,6 +7,7 @@ */ #include <linux/acpi.h> +#include <linux/cacheinfo.h> #include <linux/cpu.h> #include <linux/cpufreq.h> #include <linux/device.h> @@ -496,7 +497,7 @@ static int __init get_cpu_for_node(struct device_node *node) } static int __init parse_core(struct device_node *core, int package_id, - int core_id) + int cluster_id, int core_id) { char name[20]; bool leaf = true; @@ -512,6 +513,7 @@ static int __init parse_core(struct device_node *core, int package_id, cpu = get_cpu_for_node(t); if (cpu >= 0) { cpu_topology[cpu].package_id = package_id; + cpu_topology[cpu].cluster_id = cluster_id; cpu_topology[cpu].core_id = core_id; cpu_topology[cpu].thread_id = i; } else if (cpu != -ENODEV) { @@ -533,6 +535,7 @@ static int __init parse_core(struct device_node *core, int package_id, } cpu_topology[cpu].package_id = package_id; + cpu_topology[cpu].cluster_id = cluster_id; cpu_topology[cpu].core_id = core_id; } else if (leaf && cpu != -ENODEV) { pr_err("%pOF: Can't get CPU for leaf core\n", core); @@ -542,13 +545,13 @@ static int __init parse_core(struct device_node *core, int package_id, return 0; } -static int __init parse_cluster(struct device_node *cluster, int depth) +static int __init parse_cluster(struct device_node *cluster, int package_id, + int cluster_id, int depth) { char name[20]; bool leaf = true; bool has_cores = false; struct device_node *c; - static int package_id __initdata; int core_id = 0; int i, ret; @@ -563,7 +566,9 @@ static int __init parse_cluster(struct device_node *cluster, int depth) c = of_get_child_by_name(cluster, name); if (c) { leaf = false; - ret = parse_cluster(c, depth + 1); + ret = parse_cluster(c, package_id, i, depth + 1); + if (depth > 0) + pr_warn("Topology for clusters of clusters not yet supported\n"); of_node_put(c); if (ret != 0) return ret; @@ -587,7 +592,8 @@ static int __init parse_cluster(struct device_node *cluster, int depth) } if (leaf) { - ret = parse_core(c, package_id, core_id++); + ret = parse_core(c, package_id, cluster_id, + core_id++); } else { pr_err("%pOF: Non-leaf cluster with core %s\n", cluster, name); @@ -604,10 +610,33 @@ static int __init parse_cluster(struct device_node *cluster, int depth) if (leaf && !has_cores) pr_warn("%pOF: empty cluster\n", cluster); - if (leaf) + return 0; +} + +static int __init parse_socket(struct device_node *socket) +{ + char name[20]; + struct device_node *c; + bool has_socket = false; + int package_id = 0, ret; + + do { + snprintf(name, sizeof(name), "socket%d", package_id); + c = of_get_child_by_name(socket, name); + if (c) { + has_socket = true; + ret = parse_cluster(c, package_id, -1, 0); + of_node_put(c); + if (ret != 0) + return ret; + } package_id++; + } while (c); - return 0; + if (!has_socket) + ret = parse_cluster(socket, 0, -1, 0); + + return ret; } static int __init parse_dt_topology(void) @@ -630,7 +659,7 @@ static int __init parse_dt_topology(void) if (!map) goto out; - ret = parse_cluster(map, 0); + ret = parse_socket(map); if (ret != 0) goto out_map; @@ -641,8 +670,10 @@ static int __init parse_dt_topology(void) * only mark cores described in the DT as possible. */ for_each_possible_cpu(cpu) - if (cpu_topology[cpu].package_id == -1) + if (cpu_topology[cpu].package_id < 0) { ret = -EINVAL; + break; + } out_map: of_node_put(map); @@ -667,7 +698,8 @@ const struct cpumask *cpu_coregroup_mask(int cpu) /* not numa in package, lets use the package siblings */ core_mask = &cpu_topology[cpu].core_sibling; } - if (cpu_topology[cpu].llc_id != -1) { + + if (last_level_cache_is_valid(cpu)) { if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask)) core_mask = &cpu_topology[cpu].llc_sibling; } @@ -686,19 +718,31 @@ const struct cpumask *cpu_coregroup_mask(int cpu) const struct cpumask *cpu_clustergroup_mask(int cpu) { + /* + * Forbid cpu_clustergroup_mask() to span more or the same CPUs as + * cpu_coregroup_mask(). + */ + if (cpumask_subset(cpu_coregroup_mask(cpu), + &cpu_topology[cpu].cluster_sibling)) + return get_cpu_mask(cpu); + return &cpu_topology[cpu].cluster_sibling; } void update_siblings_masks(unsigned int cpuid) { struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; - int cpu; + int cpu, ret; + + ret = detect_cache_attributes(cpuid); + if (ret) + pr_info("Early cacheinfo failed, ret = %d\n", ret); /* update core and thread sibling masks */ for_each_online_cpu(cpu) { cpu_topo = &cpu_topology[cpu]; - if (cpu_topo->llc_id != -1 && cpuid_topo->llc_id == cpu_topo->llc_id) { + if (last_level_cache_is_shared(cpu, cpuid)) { cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling); } @@ -706,15 +750,17 @@ void update_siblings_masks(unsigned int cpuid) if (cpuid_topo->package_id != cpu_topo->package_id) continue; - if (cpuid_topo->cluster_id == cpu_topo->cluster_id && - cpuid_topo->cluster_id != -1) { + cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); + cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); + + if (cpuid_topo->cluster_id != cpu_topo->cluster_id) + continue; + + if (cpuid_topo->cluster_id >= 0) { cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling); cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling); } - cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); - cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); - if (cpuid_topo->core_id != cpu_topo->core_id) continue; @@ -750,7 +796,6 @@ void __init reset_cpu_topology(void) cpu_topo->core_id = -1; cpu_topo->cluster_id = -1; cpu_topo->package_id = -1; - cpu_topo->llc_id = -1; clear_cpu_topology(cpu); } @@ -780,15 +825,20 @@ __weak int __init parse_acpi_topology(void) #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) void __init init_cpu_topology(void) { + int ret; + reset_cpu_topology(); + ret = parse_acpi_topology(); + if (!ret) + ret = of_have_populated_dt() && parse_dt_topology(); - /* - * Discard anything that was parsed if we hit an error so we - * don't use partial information. - */ - if (parse_acpi_topology()) - reset_cpu_topology(); - else if (of_have_populated_dt() && parse_dt_topology()) + if (ret) { + /* + * Discard anything that was parsed if we hit an error so we + * don't use partial information. + */ reset_cpu_topology(); + return; + } } #endif diff --git a/drivers/base/base.h b/drivers/base/base.h index ab71403d102f..b3a43a164dcd 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -160,6 +160,7 @@ extern int devres_release_all(struct device *dev); extern void device_block_probing(void); extern void device_unblock_probing(void); extern void deferred_probe_extend_timeout(void); +extern void driver_deferred_probe_trigger(void); /* /sys/devices directory */ extern struct kset *devices_kset; diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index dad296229161..4b5cd08c5a65 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -14,7 +14,7 @@ #include <linux/cpu.h> #include <linux/device.h> #include <linux/init.h> -#include <linux/of.h> +#include <linux/of_device.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/smp.h> @@ -25,19 +25,60 @@ static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo); #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu)) #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves) #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list) +#define per_cpu_cacheinfo_idx(cpu, idx) \ + (per_cpu_cacheinfo(cpu) + (idx)) struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu) { return ci_cacheinfo(cpu); } -#ifdef CONFIG_OF static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, struct cacheinfo *sib_leaf) { + /* + * For non DT/ACPI systems, assume unique level 1 caches, + * system-wide shared caches for all other levels. This will be used + * only if arch specific code has not populated shared_cpu_map + */ + if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI))) + return !(this_leaf->level == 1); + + if ((sib_leaf->attributes & CACHE_ID) && + (this_leaf->attributes & CACHE_ID)) + return sib_leaf->id == this_leaf->id; + return sib_leaf->fw_token == this_leaf->fw_token; } +bool last_level_cache_is_valid(unsigned int cpu) +{ + struct cacheinfo *llc; + + if (!cache_leaves(cpu)) + return false; + + llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1); + + return (llc->attributes & CACHE_ID) || !!llc->fw_token; + +} + +bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y) +{ + struct cacheinfo *llc_x, *llc_y; + + if (!last_level_cache_is_valid(cpu_x) || + !last_level_cache_is_valid(cpu_y)) + return false; + + llc_x = per_cpu_cacheinfo_idx(cpu_x, cache_leaves(cpu_x) - 1); + llc_y = per_cpu_cacheinfo_idx(cpu_y, cache_leaves(cpu_y) - 1); + + return cache_leaves_are_shared(llc_x, llc_y); +} + +#ifdef CONFIG_OF /* OF properties to query for a given cache type */ struct cache_type_info { const char *size_prop; @@ -157,27 +198,16 @@ static int cache_setup_of_node(unsigned int cpu) { struct device_node *np; struct cacheinfo *this_leaf; - struct device *cpu_dev = get_cpu_device(cpu); - struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); unsigned int index = 0; - /* skip if fw_token is already populated */ - if (this_cpu_ci->info_list->fw_token) { - return 0; - } - - if (!cpu_dev) { - pr_err("No cpu device for CPU %d\n", cpu); - return -ENODEV; - } - np = cpu_dev->of_node; + np = of_cpu_device_node_get(cpu); if (!np) { pr_err("Failed to find cpu%d device node\n", cpu); return -ENOENT; } while (index < cache_leaves(cpu)) { - this_leaf = this_cpu_ci->info_list + index; + this_leaf = per_cpu_cacheinfo_idx(cpu, index); if (this_leaf->level != 1) np = of_find_next_cache_node(np); else @@ -196,16 +226,6 @@ static int cache_setup_of_node(unsigned int cpu) } #else static inline int cache_setup_of_node(unsigned int cpu) { return 0; } -static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, - struct cacheinfo *sib_leaf) -{ - /* - * For non-DT/ACPI systems, assume unique level 1 caches, system-wide - * shared caches for all other levels. This will be used only if - * arch specific code has not populated shared_cpu_map - */ - return !(this_leaf->level == 1); -} #endif int __weak cache_setup_acpi(unsigned int cpu) @@ -215,6 +235,18 @@ int __weak cache_setup_acpi(unsigned int cpu) unsigned int coherency_max_size; +static int cache_setup_properties(unsigned int cpu) +{ + int ret = 0; + + if (of_have_populated_dt()) + ret = cache_setup_of_node(cpu); + else if (!acpi_disabled) + ret = cache_setup_acpi(cpu); + + return ret; +} + static int cache_shared_cpu_map_setup(unsigned int cpu) { struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); @@ -225,21 +257,21 @@ static int cache_shared_cpu_map_setup(unsigned int cpu) if (this_cpu_ci->cpu_map_populated) return 0; - if (of_have_populated_dt()) - ret = cache_setup_of_node(cpu); - else if (!acpi_disabled) - ret = cache_setup_acpi(cpu); - - if (ret) - return ret; + /* + * skip setting up cache properties if LLC is valid, just need + * to update the shared cpu_map if the cache attributes were + * populated early before all the cpus are brought online + */ + if (!last_level_cache_is_valid(cpu)) { + ret = cache_setup_properties(cpu); + if (ret) + return ret; + } for (index = 0; index < cache_leaves(cpu); index++) { unsigned int i; - this_leaf = this_cpu_ci->info_list + index; - /* skip if shared_cpu_map is already populated */ - if (!cpumask_empty(&this_leaf->shared_cpu_map)) - continue; + this_leaf = per_cpu_cacheinfo_idx(cpu, index); cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); for_each_online_cpu(i) { @@ -247,7 +279,8 @@ static int cache_shared_cpu_map_setup(unsigned int cpu) if (i == cpu || !sib_cpu_ci->info_list) continue;/* skip if itself or no cacheinfo */ - sib_leaf = sib_cpu_ci->info_list + index; + + sib_leaf = per_cpu_cacheinfo_idx(i, index); if (cache_leaves_are_shared(this_leaf, sib_leaf)) { cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); cpumask_set_cpu(i, &this_leaf->shared_cpu_map); @@ -263,23 +296,19 @@ static int cache_shared_cpu_map_setup(unsigned int cpu) static void cache_shared_cpu_map_remove(unsigned int cpu) { - struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cacheinfo *this_leaf, *sib_leaf; unsigned int sibling, index; for (index = 0; index < cache_leaves(cpu); index++) { - this_leaf = this_cpu_ci->info_list + index; + this_leaf = per_cpu_cacheinfo_idx(cpu, index); for_each_cpu(sibling, &this_leaf->shared_cpu_map) { - struct cpu_cacheinfo *sib_cpu_ci; - - if (sibling == cpu) /* skip itself */ - continue; + struct cpu_cacheinfo *sib_cpu_ci = + get_cpu_cacheinfo(sibling); - sib_cpu_ci = get_cpu_cacheinfo(sibling); - if (!sib_cpu_ci->info_list) - continue; + if (sibling == cpu || !sib_cpu_ci->info_list) + continue;/* skip if itself or no cacheinfo */ - sib_leaf = sib_cpu_ci->info_list + index; + sib_leaf = per_cpu_cacheinfo_idx(sibling, index); cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); } @@ -310,17 +339,28 @@ int __weak populate_cache_leaves(unsigned int cpu) return -ENOENT; } -static int detect_cache_attributes(unsigned int cpu) +int detect_cache_attributes(unsigned int cpu) { int ret; + /* Since early detection of the cacheinfo is allowed via this + * function and this also gets called as CPU hotplug callbacks via + * cacheinfo_cpu_online, the initialisation can be skipped and only + * CPU maps can be updated as the CPU online status would be update + * if called via cacheinfo_cpu_online path. + */ + if (per_cpu_cacheinfo(cpu)) + goto update_cpu_map; + if (init_cache_level(cpu) || !cache_leaves(cpu)) return -ENOENT; per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), - sizeof(struct cacheinfo), GFP_KERNEL); - if (per_cpu_cacheinfo(cpu) == NULL) + sizeof(struct cacheinfo), GFP_ATOMIC); + if (per_cpu_cacheinfo(cpu) == NULL) { + cache_leaves(cpu) = 0; return -ENOMEM; + } /* * populate_cache_leaves() may completely setup the cache leaves and @@ -329,6 +369,8 @@ static int detect_cache_attributes(unsigned int cpu) ret = populate_cache_leaves(cpu); if (ret) goto free_ci; + +update_cpu_map: /* * For systems using DT for cache hierarchy, fw_token * and shared_cpu_map will be set up here only if they are @@ -614,7 +656,6 @@ static int cache_add_dev(unsigned int cpu) int rc; struct device *ci_dev, *parent; struct cacheinfo *this_leaf; - struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); const struct attribute_group **cache_groups; rc = cpu_cache_sysfs_init(cpu); @@ -623,7 +664,7 @@ static int cache_add_dev(unsigned int cpu) parent = per_cpu_cache_dev(cpu); for (i = 0; i < cache_leaves(cpu); i++) { - this_leaf = this_cpu_ci->info_list + i; + this_leaf = per_cpu_cacheinfo_idx(cpu, i); if (this_leaf->disable_sysfs) continue; if (this_leaf->type == CACHE_TYPE_NOCACHE) diff --git a/drivers/base/core.c b/drivers/base/core.c index 460d6f163e41..753e7cca0f40 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -54,6 +54,7 @@ static unsigned int defer_sync_state_count = 1; static DEFINE_MUTEX(fwnode_link_lock); static bool fw_devlink_is_permissive(void); static bool fw_devlink_drv_reg_done; +static bool fw_devlink_best_effort; /** * fwnode_link_add - Create a link between two fwnode_handles. @@ -976,6 +977,12 @@ static void device_links_missing_supplier(struct device *dev) } } +static bool dev_is_best_effort(struct device *dev) +{ + return (fw_devlink_best_effort && dev->can_match) || + (dev->fwnode && (dev->fwnode->flags & FWNODE_FLAG_BEST_EFFORT)); +} + /** * device_links_check_suppliers - Check presence of supplier drivers. * @dev: Consumer device. @@ -995,7 +1002,7 @@ static void device_links_missing_supplier(struct device *dev) int device_links_check_suppliers(struct device *dev) { struct device_link *link; - int ret = 0; + int ret = 0, fwnode_ret = 0; struct fwnode_handle *sup_fw; /* @@ -1008,12 +1015,17 @@ int device_links_check_suppliers(struct device *dev) sup_fw = list_first_entry(&dev->fwnode->suppliers, struct fwnode_link, c_hook)->supplier; - dev_err_probe(dev, -EPROBE_DEFER, "wait for supplier %pfwP\n", - sup_fw); - mutex_unlock(&fwnode_link_lock); - return -EPROBE_DEFER; + if (!dev_is_best_effort(dev)) { + fwnode_ret = -EPROBE_DEFER; + dev_err_probe(dev, -EPROBE_DEFER, + "wait for supplier %pfwP\n", sup_fw); + } else { + fwnode_ret = -EAGAIN; + } } mutex_unlock(&fwnode_link_lock); + if (fwnode_ret == -EPROBE_DEFER) + return fwnode_ret; device_links_write_lock(); @@ -1023,6 +1035,14 @@ int device_links_check_suppliers(struct device *dev) if (link->status != DL_STATE_AVAILABLE && !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) { + + if (dev_is_best_effort(dev) && + link->flags & DL_FLAG_INFERRED && + !link->supplier->can_match) { + ret = -EAGAIN; + continue; + } + device_links_missing_supplier(dev); dev_err_probe(dev, -EPROBE_DEFER, "supplier %s not ready\n", @@ -1035,7 +1055,8 @@ int device_links_check_suppliers(struct device *dev) dev->links.status = DL_DEV_PROBING; device_links_write_unlock(); - return ret; + + return ret ? ret : fwnode_ret; } /** @@ -1300,6 +1321,18 @@ void device_links_driver_bound(struct device *dev) * save to drop the managed link completely. */ device_link_drop_managed(link); + } else if (dev_is_best_effort(dev) && + link->flags & DL_FLAG_INFERRED && + link->status != DL_STATE_CONSUMER_PROBE && + !link->supplier->can_match) { + /* + * When dev_is_best_effort() is true, we ignore device + * links to suppliers that don't have a driver. If the + * consumer device still managed to probe, there's no + * point in maintaining a device link in a weird state + * (consumer probed before supplier). So delete it. + */ + device_link_drop_managed(link); } else { WARN_ON(link->status != DL_STATE_CONSUMER_PROBE); WRITE_ONCE(link->status, DL_STATE_ACTIVE); @@ -1592,7 +1625,7 @@ static int __init fw_devlink_setup(char *arg) } early_param("fw_devlink", fw_devlink_setup); -static bool fw_devlink_strict; +static bool fw_devlink_strict = true; static int __init fw_devlink_strict_setup(char *arg) { return strtobool(arg, &fw_devlink_strict); @@ -1666,6 +1699,62 @@ void fw_devlink_drivers_done(void) device_links_write_unlock(); } +/** + * wait_for_init_devices_probe - Try to probe any device needed for init + * + * Some devices might need to be probed and bound successfully before the kernel + * boot sequence can finish and move on to init/userspace. For example, a + * network interface might need to be bound to be able to mount a NFS rootfs. + * + * With fw_devlink=on by default, some of these devices might be blocked from + * probing because they are waiting on a optional supplier that doesn't have a + * driver. While fw_devlink will eventually identify such devices and unblock + * the probing automatically, it might be too late by the time it unblocks the + * probing of devices. For example, the IP4 autoconfig might timeout before + * fw_devlink unblocks probing of the network interface. + * + * This function is available to temporarily try and probe all devices that have + * a driver even if some of their suppliers haven't been added or don't have + * drivers. + * + * The drivers can then decide which of the suppliers are optional vs mandatory + * and probe the device if possible. By the time this function returns, all such + * "best effort" probes are guaranteed to be completed. If a device successfully + * probes in this mode, we delete all fw_devlink discovered dependencies of that + * device where the supplier hasn't yet probed successfully because they have to + * be optional dependencies. + * + * Any devices that didn't successfully probe go back to being treated as if + * this function was never called. + * + * This also means that some devices that aren't needed for init and could have + * waited for their optional supplier to probe (when the supplier's module is + * loaded later on) would end up probing prematurely with limited functionality. + * So call this function only when boot would fail without it. + */ +void __init wait_for_init_devices_probe(void) +{ + if (!fw_devlink_flags || fw_devlink_is_permissive()) + return; + + /* + * Wait for all ongoing probes to finish so that the "best effort" is + * only applied to devices that can't probe otherwise. + */ + wait_for_device_probe(); + + pr_info("Trying to probe devices needed for running init ...\n"); + fw_devlink_best_effort = true; + driver_deferred_probe_trigger(); + + /* + * Wait for all "best effort" probes to finish before going back to + * normal enforcement. + */ + wait_for_device_probe(); + fw_devlink_best_effort = false; +} + static void fw_devlink_unblock_consumers(struct device *dev) { struct device_link *link; @@ -3843,6 +3932,26 @@ struct device *device_find_child_by_name(struct device *parent, } EXPORT_SYMBOL_GPL(device_find_child_by_name); +static int match_any(struct device *dev, void *unused) +{ + return 1; +} + +/** + * device_find_any_child - device iterator for locating a child device, if any. + * @parent: parent struct device + * + * This is similar to the device_find_child() function above, but it + * returns a reference to a child device, if any. + * + * NOTE: you will need to drop the reference with put_device() after use. + */ +struct device *device_find_any_child(struct device *parent) +{ + return device_find_child(parent, NULL, match_any); +} +EXPORT_SYMBOL_GPL(device_find_any_child); + int __init devices_init(void) { devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index a97776ea9d99..4c98849577d4 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -570,6 +570,12 @@ ssize_t __weak cpu_show_mmio_stale_data(struct device *dev, return sysfs_emit(buf, "Not affected\n"); } +ssize_t __weak cpu_show_retbleed(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); @@ -580,6 +586,7 @@ static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL); static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL); static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL); +static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -592,6 +599,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_itlb_multihit.attr, &dev_attr_srbds.attr, &dev_attr_mmio_stale_data.attr, + &dev_attr_retbleed.attr, NULL }; diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 11b0fb6414d3..70f79fc71539 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -172,7 +172,7 @@ static bool driver_deferred_probe_enable; * changes in the midst of a probe, then deferred processing should be triggered * again. */ -static void driver_deferred_probe_trigger(void) +void driver_deferred_probe_trigger(void) { if (!driver_deferred_probe_enable) return; @@ -256,7 +256,12 @@ static int deferred_devs_show(struct seq_file *s, void *data) } DEFINE_SHOW_ATTRIBUTE(deferred_devs); +#ifdef CONFIG_MODULES +int driver_deferred_probe_timeout = 10; +#else int driver_deferred_probe_timeout; +#endif + EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout); static int __init deferred_probe_timeout_setup(char *str) @@ -269,42 +274,12 @@ static int __init deferred_probe_timeout_setup(char *str) } __setup("deferred_probe_timeout=", deferred_probe_timeout_setup); -/** - * driver_deferred_probe_check_state() - Check deferred probe state - * @dev: device to check - * - * Return: - * * -ENODEV if initcalls have completed and modules are disabled. - * * -ETIMEDOUT if the deferred probe timeout was set and has expired - * and modules are enabled. - * * -EPROBE_DEFER in other cases. - * - * Drivers or subsystems can opt-in to calling this function instead of directly - * returning -EPROBE_DEFER. - */ -int driver_deferred_probe_check_state(struct device *dev) -{ - if (!IS_ENABLED(CONFIG_MODULES) && initcalls_done) { - dev_warn(dev, "ignoring dependency for device, assuming no driver\n"); - return -ENODEV; - } - - if (!driver_deferred_probe_timeout && initcalls_done) { - dev_warn(dev, "deferred probe timeout, ignoring dependency\n"); - return -ETIMEDOUT; - } - - return -EPROBE_DEFER; -} -EXPORT_SYMBOL_GPL(driver_deferred_probe_check_state); - static void deferred_probe_timeout_work_func(struct work_struct *work) { struct device_private *p; fw_devlink_drivers_done(); - driver_deferred_probe_timeout = 0; driver_deferred_probe_trigger(); flush_work(&deferred_probe_work); @@ -580,7 +555,7 @@ static int really_probe(struct device *dev, struct device_driver *drv) { bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) && !drv->suppress_bind_attrs; - int ret; + int ret, link_ret; if (defer_all_probes) { /* @@ -592,9 +567,9 @@ static int really_probe(struct device *dev, struct device_driver *drv) return -EPROBE_DEFER; } - ret = device_links_check_suppliers(dev); - if (ret) - return ret; + link_ret = device_links_check_suppliers(dev); + if (link_ret == -EPROBE_DEFER) + return link_ret; pr_debug("bus: '%s': %s: probing driver %s with device %s\n", drv->bus->name, __func__, drv->name, dev_name(dev)); @@ -634,6 +609,15 @@ re_probe: ret = call_driver_probe(dev, drv); if (ret) { /* + * If fw_devlink_best_effort is active (denoted by -EAGAIN), the + * device might actually probe properly once some of its missing + * suppliers have probed. So, treat this as if the driver + * returned -EPROBE_DEFER. + */ + if (link_ret == -EAGAIN) + ret = -EPROBE_DEFER; + + /* * Return probe errors as positive values so that the callers * can distinguish them from other errors. */ @@ -1115,6 +1099,7 @@ static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie) static int __driver_attach(struct device *dev, void *data) { struct device_driver *drv = data; + bool async = false; int ret; /* @@ -1153,9 +1138,11 @@ static int __driver_attach(struct device *dev, void *data) if (!dev->driver && !dev->p->async_driver) { get_device(dev); dev->p->async_driver = drv; - async_schedule_dev(__driver_attach_async_helper, dev); + async = true; } device_unlock(dev); + if (async) + async_schedule_dev(__driver_attach_async_helper, dev); return 0; } diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index 8a3ddbae3b70..e4bffeabf344 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -482,6 +482,7 @@ int __init devtmpfs_init(void) if (err) { printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err); unregister_filesystem(&dev_fs_type); + thread = NULL; return err; } diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index ac3f34e80194..7c3590fd97c2 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -435,11 +435,11 @@ static int fw_decompress_xz_pages(struct device *dev, struct fw_priv *fw_priv, /* decompress onto the new allocated page */ page = fw_priv->pages[fw_priv->nr_pages - 1]; - xz_buf.out = kmap(page); + xz_buf.out = kmap_local_page(page); xz_buf.out_pos = 0; xz_buf.out_size = PAGE_SIZE; xz_ret = xz_dec_run(xz_dec, &xz_buf); - kunmap(page); + kunmap_local(xz_buf.out); fw_priv->size += xz_buf.out_pos; /* partial decompression means either end or error */ if (xz_buf.out_pos != PAGE_SIZE) diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c index 5b0b85b70b6f..77bad32c481a 100644 --- a/drivers/base/firmware_loader/sysfs.c +++ b/drivers/base/firmware_loader/sysfs.c @@ -242,19 +242,17 @@ static void firmware_rw(struct fw_priv *fw_priv, char *buffer, loff_t offset, size_t count, bool read) { while (count) { - void *page_data; int page_nr = offset >> PAGE_SHIFT; int page_ofs = offset & (PAGE_SIZE - 1); int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); - page_data = kmap(fw_priv->pages[page_nr]); - if (read) - memcpy(buffer, page_data + page_ofs, page_cnt); + memcpy_from_page(buffer, fw_priv->pages[page_nr], + page_ofs, page_cnt); else - memcpy(page_data + page_ofs, buffer, page_cnt); + memcpy_to_page(fw_priv->pages[page_nr], page_ofs, + buffer, page_cnt); - kunmap(fw_priv->pages[page_nr]); buffer += page_cnt; offset += page_cnt; count -= page_cnt; diff --git a/drivers/base/node.c b/drivers/base/node.c index 0ac6376ef7a1..eb0f43784c2b 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -45,7 +45,7 @@ static inline ssize_t cpumap_read(struct file *file, struct kobject *kobj, return n; } -static BIN_ATTR_RO(cpumap, 0); +static BIN_ATTR_RO(cpumap, CPUMAP_FILE_MAX_BYTES); static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, @@ -66,7 +66,7 @@ static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj, return n; } -static BIN_ATTR_RO(cpulist, 0); +static BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES); /** * struct node_access_nodes - Access class device to hold user visible diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 739e52cd4aba..5a2e0232862e 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -222,6 +222,9 @@ static void genpd_debug_remove(struct generic_pm_domain *genpd) { struct dentry *d; + if (!genpd_debugfs_dir) + return; + d = debugfs_lookup(genpd->name, genpd_debugfs_dir); debugfs_remove(d); } @@ -2730,7 +2733,7 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev, mutex_unlock(&gpd_list_lock); dev_dbg(dev, "%s() failed to find PM domain: %ld\n", __func__, PTR_ERR(pd)); - return driver_deferred_probe_check_state(base_dev); + return -ENODEV; } dev_dbg(dev, "adding to PM domain %s\n", pd->name); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 949907e2e242..997be3ac20a7 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1862,10 +1862,13 @@ int pm_runtime_force_suspend(struct device *dev) callback = RPM_GET_CALLBACK(dev, runtime_suspend); + dev_pm_enable_wake_irq_check(dev, true); ret = callback ? callback(dev) : 0; if (ret) goto err; + dev_pm_enable_wake_irq_complete(dev); + /* * If the device can stay in suspend after the system-wide transition * to the working state that will follow, drop the children counter of @@ -1882,6 +1885,7 @@ int pm_runtime_force_suspend(struct device *dev) return 0; err: + dev_pm_disable_wake_irq_check(dev, true); pm_runtime_enable(dev); return ret; } @@ -1915,9 +1919,11 @@ int pm_runtime_force_resume(struct device *dev) callback = RPM_GET_CALLBACK(dev, runtime_resume); + dev_pm_disable_wake_irq_check(dev, false); ret = callback ? callback(dev) : 0; if (ret) { pm_runtime_set_suspended(dev); + dev_pm_enable_wake_irq_check(dev, false); goto out; } diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 11a4ffe91367..e3befa2c1b66 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -501,36 +501,6 @@ void device_set_wakeup_capable(struct device *dev, bool capable) EXPORT_SYMBOL_GPL(device_set_wakeup_capable); /** - * device_init_wakeup - Device wakeup initialization. - * @dev: Device to handle. - * @enable: Whether or not to enable @dev as a wakeup device. - * - * By default, most devices should leave wakeup disabled. The exceptions are - * devices that everyone expects to be wakeup sources: keyboards, power buttons, - * possibly network interfaces, etc. Also, devices that don't generate their - * own wakeup requests but merely forward requests from one bus to another - * (like PCI bridges) should have wakeup enabled by default. - */ -int device_init_wakeup(struct device *dev, bool enable) -{ - int ret = 0; - - if (!dev) - return -EINVAL; - - if (enable) { - device_set_wakeup_capable(dev, true); - ret = device_wakeup_enable(dev); - } else { - device_wakeup_disable(dev); - device_set_wakeup_capable(dev, false); - } - - return ret; -} -EXPORT_SYMBOL_GPL(device_init_wakeup); - -/** * device_set_wakeup_enable - Enable or disable a device to wake up the system. * @dev: Device to handle. * @enable: enable/disable flag diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index d0f5bc827978..362e043e26d8 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -133,6 +133,12 @@ int regcache_init(struct regmap *map, const struct regmap_config *config) return -EINVAL; } + if (config->num_reg_defaults && !config->reg_defaults) { + dev_err(map->dev, + "Register defaults number are set without the reg!\n"); + return -EINVAL; + } + for (i = 0; i < config->num_reg_defaults; i++) if (config->reg_defaults[i].reg % map->reg_stride) return -EINVAL; @@ -495,7 +501,8 @@ EXPORT_SYMBOL_GPL(regcache_drop_region); void regcache_cache_only(struct regmap *map, bool enable) { map->lock(map->lock_arg); - WARN_ON(map->cache_bypass && enable); + WARN_ON(map->cache_type != REGCACHE_NONE && + map->cache_bypass && enable); map->cache_only = enable; trace_regmap_cache_only(map, enable); map->unlock(map->lock_arg); @@ -531,7 +538,7 @@ EXPORT_SYMBOL_GPL(regcache_mark_dirty); * @enable: flag if changes should not be written to the cache * * When a register map is marked with the cache bypass option, writes - * to the register map API will only update the hardware and not the + * to the register map API will only update the hardware and not * the cache directly. This is useful when syncing the cache back to * the hardware. */ diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index a6db605707b0..4ef9488d05cd 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -30,6 +30,9 @@ struct regmap_irq_chip_data { int irq; int wake_count; + unsigned int mask_base; + unsigned int unmask_base; + void *status_reg_buf; unsigned int *main_status_buf; unsigned int *status_buf; @@ -39,33 +42,15 @@ struct regmap_irq_chip_data { unsigned int *type_buf; unsigned int *type_buf_def; unsigned int **virt_buf; + unsigned int **config_buf; unsigned int irq_reg_stride; - unsigned int type_reg_stride; - bool clear_status:1; -}; + unsigned int (*get_irq_reg)(struct regmap_irq_chip_data *data, + unsigned int base, int index); -static int sub_irq_reg(struct regmap_irq_chip_data *data, - unsigned int base_reg, int i) -{ - const struct regmap_irq_chip *chip = data->chip; - struct regmap *map = data->map; - struct regmap_irq_sub_irq_map *subreg; - unsigned int offset; - int reg = 0; - - if (!chip->sub_reg_offsets || !chip->not_fixed_stride) { - /* Assume linear mapping */ - reg = base_reg + (i * map->reg_stride * data->irq_reg_stride); - } else { - subreg = &chip->sub_reg_offsets[i]; - offset = subreg->offset[0]; - reg = base_reg + offset; - } - - return reg; -} + unsigned int clear_status:1; +}; static inline const struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data, @@ -74,21 +59,25 @@ struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data, return &data->chip->irqs[irq]; } -static void regmap_irq_lock(struct irq_data *data) +static bool regmap_irq_can_bulk_read_status(struct regmap_irq_chip_data *data) { - struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); + struct regmap *map = data->map; - mutex_lock(&d->lock); + /* + * While possible that a user-defined ->get_irq_reg() callback might + * be linear enough to support bulk reads, most of the time it won't. + * Therefore only allow them if the default callback is being used. + */ + return data->irq_reg_stride == 1 && map->reg_stride == 1 && + data->get_irq_reg == regmap_irq_get_irq_reg_linear && + !map->use_single_read; } -static int regmap_irq_update_bits(struct regmap_irq_chip_data *d, - unsigned int reg, unsigned int mask, - unsigned int val) +static void regmap_irq_lock(struct irq_data *data) { - if (d->chip->mask_writeonly) - return regmap_write_bits(d->map, reg, mask, val); - else - return regmap_update_bits(d->map, reg, mask, val); + struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); + + mutex_lock(&d->lock); } static void regmap_irq_sync_unlock(struct irq_data *data) @@ -97,7 +86,6 @@ static void regmap_irq_sync_unlock(struct irq_data *data) struct regmap *map = d->map; int i, j, ret; u32 reg; - u32 unmask_offset; u32 val; if (d->chip->runtime_pm) { @@ -109,7 +97,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data) if (d->clear_status) { for (i = 0; i < d->chip->num_regs; i++) { - reg = sub_irq_reg(d, d->chip->status_base, i); + reg = d->get_irq_reg(d, d->chip->status_base, i); ret = regmap_read(map, reg, &val); if (ret) @@ -126,44 +114,32 @@ static void regmap_irq_sync_unlock(struct irq_data *data) * suppress pointless writes. */ for (i = 0; i < d->chip->num_regs; i++) { - if (!d->chip->mask_base) - continue; + if (d->mask_base) { + reg = d->get_irq_reg(d, d->mask_base, i); + ret = regmap_update_bits(d->map, reg, + d->mask_buf_def[i], d->mask_buf[i]); + if (ret) + dev_err(d->map->dev, "Failed to sync masks in %x\n", + reg); + } - reg = sub_irq_reg(d, d->chip->mask_base, i); - if (d->chip->mask_invert) { - ret = regmap_irq_update_bits(d, reg, - d->mask_buf_def[i], ~d->mask_buf[i]); - } else if (d->chip->unmask_base) { - /* set mask with mask_base register */ - ret = regmap_irq_update_bits(d, reg, + if (d->unmask_base) { + reg = d->get_irq_reg(d, d->unmask_base, i); + ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], ~d->mask_buf[i]); - if (ret < 0) - dev_err(d->map->dev, - "Failed to sync unmasks in %x\n", + if (ret) + dev_err(d->map->dev, "Failed to sync masks in %x\n", reg); - unmask_offset = d->chip->unmask_base - - d->chip->mask_base; - /* clear mask with unmask_base register */ - ret = regmap_irq_update_bits(d, - reg + unmask_offset, - d->mask_buf_def[i], - d->mask_buf[i]); - } else { - ret = regmap_irq_update_bits(d, reg, - d->mask_buf_def[i], d->mask_buf[i]); } - if (ret != 0) - dev_err(d->map->dev, "Failed to sync masks in %x\n", - reg); - reg = sub_irq_reg(d, d->chip->wake_base, i); + reg = d->get_irq_reg(d, d->chip->wake_base, i); if (d->wake_buf) { if (d->chip->wake_invert) - ret = regmap_irq_update_bits(d, reg, + ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], ~d->wake_buf[i]); else - ret = regmap_irq_update_bits(d, reg, + ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], d->wake_buf[i]); if (ret != 0) @@ -180,7 +156,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data) * it'll be ignored in irq handler, then may introduce irq storm */ if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) { - reg = sub_irq_reg(d, d->chip->ack_base, i); + reg = d->get_irq_reg(d, d->chip->ack_base, i); /* some chips ack by write 0 */ if (d->chip->ack_invert) @@ -204,12 +180,12 @@ static void regmap_irq_sync_unlock(struct irq_data *data) for (i = 0; i < d->chip->num_type_reg; i++) { if (!d->type_buf_def[i]) continue; - reg = sub_irq_reg(d, d->chip->type_base, i); + reg = d->get_irq_reg(d, d->chip->type_base, i); if (d->chip->type_invert) - ret = regmap_irq_update_bits(d, reg, + ret = regmap_update_bits(d->map, reg, d->type_buf_def[i], ~d->type_buf[i]); else - ret = regmap_irq_update_bits(d, reg, + ret = regmap_update_bits(d->map, reg, d->type_buf_def[i], d->type_buf[i]); if (ret != 0) dev_err(d->map->dev, "Failed to sync type in %x\n", @@ -220,8 +196,8 @@ static void regmap_irq_sync_unlock(struct irq_data *data) if (d->chip->num_virt_regs) { for (i = 0; i < d->chip->num_virt_regs; i++) { for (j = 0; j < d->chip->num_regs; j++) { - reg = sub_irq_reg(d, d->chip->virt_reg_base[i], - j); + reg = d->get_irq_reg(d, d->chip->virt_reg_base[i], + j); ret = regmap_write(map, reg, d->virt_buf[i][j]); if (ret != 0) dev_err(d->map->dev, @@ -231,6 +207,17 @@ static void regmap_irq_sync_unlock(struct irq_data *data) } } + for (i = 0; i < d->chip->num_config_bases; i++) { + for (j = 0; j < d->chip->num_config_regs; j++) { + reg = d->get_irq_reg(d, d->chip->config_base[i], j); + ret = regmap_write(map, reg, d->config_buf[i][j]); + if (ret) + dev_err(d->map->dev, + "Failed to write config %x: %d\n", + reg, ret); + } + } + if (d->chip->runtime_pm) pm_runtime_put(map->dev); @@ -253,22 +240,19 @@ static void regmap_irq_enable(struct irq_data *data) struct regmap *map = d->map; const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); unsigned int reg = irq_data->reg_offset / map->reg_stride; - unsigned int mask, type; - - type = irq_data->type.type_falling_val | irq_data->type.type_rising_val; + unsigned int mask; /* * The type_in_mask flag means that the underlying hardware uses - * separate mask bits for rising and falling edge interrupts, but - * we want to make them into a single virtual interrupt with - * configurable edge. + * separate mask bits for each interrupt trigger type, but we want + * to have a single logical interrupt with a configurable type. * - * If the interrupt we're enabling defines the falling or rising - * masks then instead of using the regular mask bits for this - * interrupt, use the value previously written to the type buffer - * at the corresponding offset in regmap_irq_set_type(). + * If the interrupt we're enabling defines any supported types + * then instead of using the regular mask bits for this interrupt, + * use the value previously written to the type buffer at the + * corresponding offset in regmap_irq_set_type(). */ - if (d->chip->type_in_mask && type) + if (d->chip->type_in_mask && irq_data->type.types_supported) mask = d->type_buf[reg] & irq_data->mask; else mask = irq_data->mask; @@ -293,7 +277,7 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type) struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); struct regmap *map = d->map; const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); - int reg; + int reg, ret; const struct regmap_irq_type *t = &irq_data->type; if ((t->types_supported & type) != type) @@ -333,9 +317,19 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type) return -EINVAL; } - if (d->chip->set_type_virt) - return d->chip->set_type_virt(d->virt_buf, type, data->hwirq, - reg); + if (d->chip->set_type_virt) { + ret = d->chip->set_type_virt(d->virt_buf, type, data->hwirq, + reg); + if (ret) + return ret; + } + + if (d->chip->set_type_config) { + ret = d->chip->set_type_config(d->config_buf, type, + irq_data, reg); + if (ret) + return ret; + } return 0; } @@ -376,14 +370,17 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, const struct regmap_irq_chip *chip = data->chip; struct regmap *map = data->map; struct regmap_irq_sub_irq_map *subreg; + unsigned int reg; int i, ret = 0; if (!chip->sub_reg_offsets) { - /* Assume linear mapping */ - ret = regmap_read(map, chip->status_base + - (b * map->reg_stride * data->irq_reg_stride), - &data->status_buf[b]); + reg = data->get_irq_reg(data, chip->status_base, b); + ret = regmap_read(map, reg, &data->status_buf[b]); } else { + /* + * Note we can't use ->get_irq_reg() here because the offsets + * in 'subreg' are *not* interchangeable with indices. + */ subreg = &chip->sub_reg_offsets[b]; for (i = 0; i < subreg->num_regs; i++) { unsigned int offset = subreg->offset[i]; @@ -449,10 +446,18 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) * sake of simplicity. and add bulk reads only if needed */ for (i = 0; i < chip->num_main_regs; i++) { - ret = regmap_read(map, chip->main_status + - (i * map->reg_stride - * data->irq_reg_stride), - &data->main_status_buf[i]); + /* + * For not_fixed_stride, don't use ->get_irq_reg(). + * It would produce an incorrect result. + */ + if (data->chip->not_fixed_stride) + reg = chip->main_status + + i * map->reg_stride * data->irq_reg_stride; + else + reg = data->get_irq_reg(data, + chip->main_status, i); + + ret = regmap_read(map, reg, &data->main_status_buf[i]); if (ret) { dev_err(map->dev, "Failed to read IRQ status %d\n", @@ -481,8 +486,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) } } - } else if (!map->use_single_read && map->reg_stride == 1 && - data->irq_reg_stride == 1) { + } else if (regmap_irq_can_bulk_read_status(data)) { u8 *buf8 = data->status_reg_buf; u16 *buf16 = data->status_reg_buf; @@ -518,7 +522,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) } else { for (i = 0; i < data->chip->num_regs; i++) { - unsigned int reg = sub_irq_reg(data, + unsigned int reg = data->get_irq_reg(data, data->chip->status_base, i); ret = regmap_read(map, reg, &data->status_buf[i]); @@ -546,7 +550,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) data->status_buf[i] &= ~data->mask_buf[i]; if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) { - reg = sub_irq_reg(data, data->chip->ack_base, i); + reg = data->get_irq_reg(data, data->chip->ack_base, i); if (chip->ack_invert) ret = regmap_write(map, reg, @@ -607,6 +611,91 @@ static const struct irq_domain_ops regmap_domain_ops = { }; /** + * regmap_irq_get_irq_reg_linear() - Linear IRQ register mapping callback. + * @data: Data for the &struct regmap_irq_chip + * @base: Base register + * @index: Register index + * + * Returns the register address corresponding to the given @base and @index + * by the formula ``base + index * regmap_stride * irq_reg_stride``. + */ +unsigned int regmap_irq_get_irq_reg_linear(struct regmap_irq_chip_data *data, + unsigned int base, int index) +{ + const struct regmap_irq_chip *chip = data->chip; + struct regmap *map = data->map; + + /* + * FIXME: This is for backward compatibility and should be removed + * when not_fixed_stride is dropped (it's only used by qcom-pm8008). + */ + if (chip->not_fixed_stride && chip->sub_reg_offsets) { + struct regmap_irq_sub_irq_map *subreg; + + subreg = &chip->sub_reg_offsets[0]; + return base + subreg->offset[0]; + } + + return base + index * map->reg_stride * data->irq_reg_stride; +} +EXPORT_SYMBOL_GPL(regmap_irq_get_irq_reg_linear); + +/** + * regmap_irq_set_type_config_simple() - Simple IRQ type configuration callback. + * @buf: Buffer containing configuration register values, this is a 2D array of + * `num_config_bases` rows, each of `num_config_regs` elements. + * @type: The requested IRQ type. + * @irq_data: The IRQ being configured. + * @idx: Index of the irq's config registers within each array `buf[i]` + * + * This is a &struct regmap_irq_chip->set_type_config callback suitable for + * chips with one config register. Register values are updated according to + * the &struct regmap_irq_type data associated with an IRQ. + */ +int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type, + const struct regmap_irq *irq_data, int idx) +{ + const struct regmap_irq_type *t = &irq_data->type; + + if (t->type_reg_mask) + buf[0][idx] &= ~t->type_reg_mask; + else + buf[0][idx] &= ~(t->type_falling_val | + t->type_rising_val | + t->type_level_low_val | + t->type_level_high_val); + + switch (type) { + case IRQ_TYPE_EDGE_FALLING: + buf[0][idx] |= t->type_falling_val; + break; + + case IRQ_TYPE_EDGE_RISING: + buf[0][idx] |= t->type_rising_val; + break; + + case IRQ_TYPE_EDGE_BOTH: + buf[0][idx] |= (t->type_falling_val | + t->type_rising_val); + break; + + case IRQ_TYPE_LEVEL_HIGH: + buf[0][idx] |= t->type_level_high_val; + break; + + case IRQ_TYPE_LEVEL_LOW: + buf[0][idx] |= t->type_level_low_val; + break; + + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(regmap_irq_set_type_config_simple); + +/** * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling * * @fwnode: The firmware node where the IRQ domain should be added to. @@ -634,7 +723,6 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, int ret = -ENOMEM; int num_type_reg; u32 reg; - u32 unmask_offset; if (chip->num_regs <= 0) return -EINVAL; @@ -651,11 +739,19 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, } if (chip->not_fixed_stride) { + dev_warn(map->dev, "not_fixed_stride is deprecated; use ->get_irq_reg() instead"); + for (i = 0; i < chip->num_regs; i++) if (chip->sub_reg_offsets[i].num_regs != 1) return -EINVAL; } + if (chip->num_type_reg) + dev_warn(map->dev, "type registers are deprecated; use config registers instead"); + + if (chip->num_virt_regs || chip->virt_reg_base || chip->set_type_virt) + dev_warn(map->dev, "virtual registers are deprecated; use config registers instead"); + if (irq_base) { irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0); if (irq_base < 0) { @@ -671,30 +767,30 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, if (chip->num_main_regs) { d->main_status_buf = kcalloc(chip->num_main_regs, - sizeof(unsigned int), + sizeof(*d->main_status_buf), GFP_KERNEL); if (!d->main_status_buf) goto err_alloc; } - d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int), + d->status_buf = kcalloc(chip->num_regs, sizeof(*d->status_buf), GFP_KERNEL); if (!d->status_buf) goto err_alloc; - d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int), + d->mask_buf = kcalloc(chip->num_regs, sizeof(*d->mask_buf), GFP_KERNEL); if (!d->mask_buf) goto err_alloc; - d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int), + d->mask_buf_def = kcalloc(chip->num_regs, sizeof(*d->mask_buf_def), GFP_KERNEL); if (!d->mask_buf_def) goto err_alloc; if (chip->wake_base) { - d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int), + d->wake_buf = kcalloc(chip->num_regs, sizeof(*d->wake_buf), GFP_KERNEL); if (!d->wake_buf) goto err_alloc; @@ -703,11 +799,11 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg; if (num_type_reg) { d->type_buf_def = kcalloc(num_type_reg, - sizeof(unsigned int), GFP_KERNEL); + sizeof(*d->type_buf_def), GFP_KERNEL); if (!d->type_buf_def) goto err_alloc; - d->type_buf = kcalloc(num_type_reg, sizeof(unsigned int), + d->type_buf = kcalloc(num_type_reg, sizeof(*d->type_buf), GFP_KERNEL); if (!d->type_buf) goto err_alloc; @@ -724,13 +820,31 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, for (i = 0; i < chip->num_virt_regs; i++) { d->virt_buf[i] = kcalloc(chip->num_regs, - sizeof(unsigned int), + sizeof(**d->virt_buf), GFP_KERNEL); if (!d->virt_buf[i]) goto err_alloc; } } + if (chip->num_config_bases && chip->num_config_regs) { + /* + * Create config_buf[num_config_bases][num_config_regs] + */ + d->config_buf = kcalloc(chip->num_config_bases, + sizeof(*d->config_buf), GFP_KERNEL); + if (!d->config_buf) + goto err_alloc; + + for (i = 0; i < chip->num_config_regs; i++) { + d->config_buf[i] = kcalloc(chip->num_config_regs, + sizeof(**d->config_buf), + GFP_KERNEL); + if (!d->config_buf[i]) + goto err_alloc; + } + } + d->irq_chip = regmap_irq_chip; d->irq_chip.name = chip->name; d->irq = irq; @@ -738,18 +852,53 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, d->chip = chip; d->irq_base = irq_base; + if (chip->mask_base && chip->unmask_base && + !chip->mask_unmask_non_inverted) { + /* + * Chips that specify both mask_base and unmask_base used to + * get inverted mask behavior by default, with no way to ask + * for the normal, non-inverted behavior. This "inverted by + * default" behavior is deprecated, but we have to support it + * until existing drivers have been fixed. + * + * Existing drivers should be updated by swapping mask_base + * and unmask_base and setting mask_unmask_non_inverted=true. + * New drivers should always set the flag. + */ + dev_warn(map->dev, "mask_base and unmask_base are inverted, please fix it"); + + /* Might as well warn about mask_invert while we're at it... */ + if (chip->mask_invert) + dev_warn(map->dev, "mask_invert=true ignored"); + + d->mask_base = chip->unmask_base; + d->unmask_base = chip->mask_base; + } else if (chip->mask_invert) { + /* + * Swap the roles of mask_base and unmask_base if the bits are + * inverted. This is deprecated, drivers should use unmask_base + * directly. + */ + dev_warn(map->dev, "mask_invert=true is deprecated; please switch to unmask_base"); + + d->mask_base = chip->unmask_base; + d->unmask_base = chip->mask_base; + } else { + d->mask_base = chip->mask_base; + d->unmask_base = chip->unmask_base; + } + if (chip->irq_reg_stride) d->irq_reg_stride = chip->irq_reg_stride; else d->irq_reg_stride = 1; - if (chip->type_reg_stride) - d->type_reg_stride = chip->type_reg_stride; + if (chip->get_irq_reg) + d->get_irq_reg = chip->get_irq_reg; else - d->type_reg_stride = 1; + d->get_irq_reg = regmap_irq_get_irq_reg_linear; - if (!map->use_single_read && map->reg_stride == 1 && - d->irq_reg_stride == 1) { + if (regmap_irq_can_bulk_read_status(d)) { d->status_reg_buf = kmalloc_array(chip->num_regs, map->format.val_bytes, GFP_KERNEL); @@ -766,35 +915,34 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, /* Mask all the interrupts by default */ for (i = 0; i < chip->num_regs; i++) { d->mask_buf[i] = d->mask_buf_def[i]; - if (!chip->mask_base) - continue; - reg = sub_irq_reg(d, d->chip->mask_base, i); + if (d->mask_base) { + reg = d->get_irq_reg(d, d->mask_base, i); + ret = regmap_update_bits(d->map, reg, + d->mask_buf_def[i], d->mask_buf[i]); + if (ret) { + dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", + reg, ret); + goto err_alloc; + } + } - if (chip->mask_invert) - ret = regmap_irq_update_bits(d, reg, - d->mask_buf[i], ~d->mask_buf[i]); - else if (d->chip->unmask_base) { - unmask_offset = d->chip->unmask_base - - d->chip->mask_base; - ret = regmap_irq_update_bits(d, - reg + unmask_offset, - d->mask_buf[i], - d->mask_buf[i]); - } else - ret = regmap_irq_update_bits(d, reg, - d->mask_buf[i], d->mask_buf[i]); - if (ret != 0) { - dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", - reg, ret); - goto err_alloc; + if (d->unmask_base) { + reg = d->get_irq_reg(d, d->unmask_base, i); + ret = regmap_update_bits(d->map, reg, + d->mask_buf_def[i], ~d->mask_buf[i]); + if (ret) { + dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", + reg, ret); + goto err_alloc; + } } if (!chip->init_ack_masked) continue; /* Ack masked but set interrupts */ - reg = sub_irq_reg(d, d->chip->status_base, i); + reg = d->get_irq_reg(d, d->chip->status_base, i); ret = regmap_read(map, reg, &d->status_buf[i]); if (ret != 0) { dev_err(map->dev, "Failed to read IRQ status: %d\n", @@ -806,7 +954,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, d->status_buf[i] = ~d->status_buf[i]; if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) { - reg = sub_irq_reg(d, d->chip->ack_base, i); + reg = d->get_irq_reg(d, d->chip->ack_base, i); if (chip->ack_invert) ret = regmap_write(map, reg, ~(d->status_buf[i] & d->mask_buf[i])); @@ -831,14 +979,14 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, if (d->wake_buf) { for (i = 0; i < chip->num_regs; i++) { d->wake_buf[i] = d->mask_buf_def[i]; - reg = sub_irq_reg(d, d->chip->wake_base, i); + reg = d->get_irq_reg(d, d->chip->wake_base, i); if (chip->wake_invert) - ret = regmap_irq_update_bits(d, reg, + ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], 0); else - ret = regmap_irq_update_bits(d, reg, + ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], d->wake_buf[i]); if (ret != 0) { @@ -851,7 +999,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, if (chip->num_type_reg && !chip->type_in_mask) { for (i = 0; i < chip->num_type_reg; ++i) { - reg = sub_irq_reg(d, d->chip->type_base, i); + reg = d->get_irq_reg(d, d->chip->type_base, i); ret = regmap_read(map, reg, &d->type_buf_def[i]); @@ -907,6 +1055,11 @@ err_alloc: kfree(d->virt_buf[i]); kfree(d->virt_buf); } + if (d->config_buf) { + for (i = 0; i < chip->num_config_bases; i++) + kfree(d->config_buf[i]); + kfree(d->config_buf); + } kfree(d); return ret; } @@ -947,7 +1100,7 @@ EXPORT_SYMBOL_GPL(regmap_add_irq_chip); void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) { unsigned int virq; - int hwirq; + int i, hwirq; if (!d) return; @@ -977,6 +1130,11 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) kfree(d->mask_buf); kfree(d->status_reg_buf); kfree(d->status_buf); + if (d->config_buf) { + for (i = 0; i < d->chip->num_config_bases; i++) + kfree(d->config_buf[i]); + kfree(d->config_buf); + } kfree(d); } EXPORT_SYMBOL_GPL(regmap_del_irq_chip); diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index c3517ccc3159..fee221c5008c 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -882,6 +882,8 @@ struct regmap *__regmap_init(struct device *dev, if (config && config->read && config->write) { map->reg_read = _regmap_bus_read; + if (config->reg_update_bits) + map->reg_update_bits = config->reg_update_bits; /* Bulk read/write */ map->read = config->read; @@ -1298,6 +1300,9 @@ static void regmap_field_init(struct regmap_field *rm_field, rm_field->reg = reg_field.reg; rm_field->shift = reg_field.lsb; rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); + + WARN_ONCE(rm_field->mask == 0, "invalid empty mask defined\n"); + rm_field->id_size = reg_field.id_size; rm_field->id_offset = reg_field.id_offset; } @@ -2219,6 +2224,28 @@ int regmap_field_update_bits_base(struct regmap_field *field, EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); /** + * regmap_field_test_bits() - Check if all specified bits are set in a + * register field. + * + * @field: Register field to operate on + * @bits: Bits to test + * + * Returns -1 if the underlying regmap_field_read() fails, 0 if at least one of the + * tested bits is not set and 1 if all tested bits are set. + */ +int regmap_field_test_bits(struct regmap_field *field, unsigned int bits) +{ + unsigned int val, ret; + + ret = regmap_field_read(field, &val); + if (ret) + return ret; + + return (val & bits) == bits; +} +EXPORT_SYMBOL_GPL(regmap_field_test_bits); + +/** * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a * register field with port ID * diff --git a/drivers/base/topology.c b/drivers/base/topology.c index ac6ad9ab67f9..89f98be5c5b9 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -62,47 +62,47 @@ define_id_show_func(ppin, "0x%llx"); static DEVICE_ATTR_ADMIN_RO(ppin); define_siblings_read_func(thread_siblings, sibling_cpumask); -static BIN_ATTR_RO(thread_siblings, 0); -static BIN_ATTR_RO(thread_siblings_list, 0); +static BIN_ATTR_RO(thread_siblings, CPUMAP_FILE_MAX_BYTES); +static BIN_ATTR_RO(thread_siblings_list, CPULIST_FILE_MAX_BYTES); define_siblings_read_func(core_cpus, sibling_cpumask); -static BIN_ATTR_RO(core_cpus, 0); -static BIN_ATTR_RO(core_cpus_list, 0); +static BIN_ATTR_RO(core_cpus, CPUMAP_FILE_MAX_BYTES); +static BIN_ATTR_RO(core_cpus_list, CPULIST_FILE_MAX_BYTES); define_siblings_read_func(core_siblings, core_cpumask); -static BIN_ATTR_RO(core_siblings, 0); -static BIN_ATTR_RO(core_siblings_list, 0); +static BIN_ATTR_RO(core_siblings, CPUMAP_FILE_MAX_BYTES); +static BIN_ATTR_RO(core_siblings_list, CPULIST_FILE_MAX_BYTES); #ifdef TOPOLOGY_CLUSTER_SYSFS define_siblings_read_func(cluster_cpus, cluster_cpumask); -static BIN_ATTR_RO(cluster_cpus, 0); -static BIN_ATTR_RO(cluster_cpus_list, 0); +static BIN_ATTR_RO(cluster_cpus, CPUMAP_FILE_MAX_BYTES); +static BIN_ATTR_RO(cluster_cpus_list, CPULIST_FILE_MAX_BYTES); #endif #ifdef TOPOLOGY_DIE_SYSFS define_siblings_read_func(die_cpus, die_cpumask); -static BIN_ATTR_RO(die_cpus, 0); -static BIN_ATTR_RO(die_cpus_list, 0); +static BIN_ATTR_RO(die_cpus, CPUMAP_FILE_MAX_BYTES); +static BIN_ATTR_RO(die_cpus_list, CPULIST_FILE_MAX_BYTES); #endif define_siblings_read_func(package_cpus, core_cpumask); -static BIN_ATTR_RO(package_cpus, 0); -static BIN_ATTR_RO(package_cpus_list, 0); +static BIN_ATTR_RO(package_cpus, CPUMAP_FILE_MAX_BYTES); +static BIN_ATTR_RO(package_cpus_list, CPULIST_FILE_MAX_BYTES); #ifdef TOPOLOGY_BOOK_SYSFS define_id_show_func(book_id, "%d"); static DEVICE_ATTR_RO(book_id); define_siblings_read_func(book_siblings, book_cpumask); -static BIN_ATTR_RO(book_siblings, 0); -static BIN_ATTR_RO(book_siblings_list, 0); +static BIN_ATTR_RO(book_siblings, CPUMAP_FILE_MAX_BYTES); +static BIN_ATTR_RO(book_siblings_list, CPULIST_FILE_MAX_BYTES); #endif #ifdef TOPOLOGY_DRAWER_SYSFS define_id_show_func(drawer_id, "%d"); static DEVICE_ATTR_RO(drawer_id); define_siblings_read_func(drawer_siblings, drawer_cpumask); -static BIN_ATTR_RO(drawer_siblings, 0); -static BIN_ATTR_RO(drawer_siblings_list, 0); +static BIN_ATTR_RO(drawer_siblings, CPUMAP_FILE_MAX_BYTES); +static BIN_ATTR_RO(drawer_siblings_list, CPULIST_FILE_MAX_BYTES); #endif static struct bin_attribute *bin_attrs[] = { |