diff options
| author | Greg Kroah-Hartman <[email protected]> | 2023-12-11 08:39:35 +0100 |
|---|---|---|
| committer | Greg Kroah-Hartman <[email protected]> | 2023-12-11 08:39:35 +0100 |
| commit | 0e42b5fee8a8c5bc173f702b0745da6d9329c714 (patch) | |
| tree | a71b0daded5764fcc33b88f9344872c13f4b488e /lib/group_cpus.c | |
| parent | 386a766c4169006d0e9df44823849930b8995e32 (diff) | |
| parent | a39b6ac3781d46ba18193c9dbb2110f31e9bffe9 (diff) | |
Merge 6.7-rc5 into char-misc-next
We need the char/misc fixes in here as well for testing and to build off
of.
Signed-off-by: Greg Kroah-Hartman <[email protected]>
Diffstat (limited to 'lib/group_cpus.c')
| -rw-r--r-- | lib/group_cpus.c | 22 |
1 files changed, 16 insertions, 6 deletions
diff --git a/lib/group_cpus.c b/lib/group_cpus.c index aa3f6815bb12..ee272c4cefcc 100644 --- a/lib/group_cpus.c +++ b/lib/group_cpus.c @@ -366,13 +366,25 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps) if (!masks) goto fail_node_to_cpumask; - /* Stabilize the cpumasks */ - cpus_read_lock(); build_node_to_cpumask(node_to_cpumask); + /* + * Make a local cache of 'cpu_present_mask', so the two stages + * spread can observe consistent 'cpu_present_mask' without holding + * cpu hotplug lock, then we can reduce deadlock risk with cpu + * hotplug code. + * + * Here CPU hotplug may happen when reading `cpu_present_mask`, and + * we can live with the case because it only affects that hotplug + * CPU is handled in the 1st or 2nd stage, and either way is correct + * from API user viewpoint since 2-stage spread is sort of + * optimization. + */ + cpumask_copy(npresmsk, data_race(cpu_present_mask)); + /* grouping present CPUs first */ ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask, - cpu_present_mask, nmsk, masks); + npresmsk, nmsk, masks); if (ret < 0) goto fail_build_affinity; nr_present = ret; @@ -387,15 +399,13 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps) curgrp = 0; else curgrp = nr_present; - cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask); + cpumask_andnot(npresmsk, cpu_possible_mask, npresmsk); ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask, npresmsk, nmsk, masks); if (ret >= 0) nr_others = ret; fail_build_affinity: - cpus_read_unlock(); - if (ret >= 0) WARN_ON(nr_present + nr_others < numgrps); |