diff options
| author | Dmitry Torokhov <[email protected]> | 2023-05-01 15:20:08 -0700 | 
|---|---|---|
| committer | Dmitry Torokhov <[email protected]> | 2023-05-01 15:20:08 -0700 | 
| commit | 9a87ffc99ec8eb8d35eed7c4f816d75f5cc9662e (patch) | |
| tree | d57f3a63479a07b4e0cece029886e76e04feb984 /kernel/sched/topology.c | |
| parent | 5dc63e56a9cf8df0b59c234a505a1653f1bdf885 (diff) | |
| parent | 53bea86b5712c7491bb3dae12e271666df0a308c (diff) | |
Merge branch 'next' into for-linus
Prepare input updates for 6.4 merge window.
Diffstat (limited to 'kernel/sched/topology.c')
| -rw-r--r-- | kernel/sched/topology.c | 99 | 
1 files changed, 97 insertions, 2 deletions
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 8739c2a5a54e..051aaf65c749 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -3,6 +3,8 @@   * Scheduler topology setup/handling methods   */ +#include <linux/bsearch.h> +  DEFINE_MUTEX(sched_domains_mutex);  /* Protected by sched_domains_mutex: */ @@ -578,7 +580,7 @@ out:   */  struct root_domain def_root_domain; -void init_defrootdomain(void) +void __init init_defrootdomain(void)  {  	init_rootdomain(&def_root_domain); @@ -2067,6 +2069,99 @@ unlock:  	return found;  } +struct __cmp_key { +	const struct cpumask *cpus; +	struct cpumask ***masks; +	int node; +	int cpu; +	int w; +}; + +static int hop_cmp(const void *a, const void *b) +{ +	struct cpumask **prev_hop, **cur_hop = *(struct cpumask ***)b; +	struct __cmp_key *k = (struct __cmp_key *)a; + +	if (cpumask_weight_and(k->cpus, cur_hop[k->node]) <= k->cpu) +		return 1; + +	if (b == k->masks) { +		k->w = 0; +		return 0; +	} + +	prev_hop = *((struct cpumask ***)b - 1); +	k->w = cpumask_weight_and(k->cpus, prev_hop[k->node]); +	if (k->w <= k->cpu) +		return 0; + +	return -1; +} + +/* + * sched_numa_find_nth_cpu() - given the NUMA topology, find the Nth next cpu + *                             closest to @cpu from @cpumask. + * cpumask: cpumask to find a cpu from + * cpu: Nth cpu to find + * + * returns: cpu, or nr_cpu_ids when nothing found. + */ +int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node) +{ +	struct __cmp_key k = { .cpus = cpus, .node = node, .cpu = cpu }; +	struct cpumask ***hop_masks; +	int hop, ret = nr_cpu_ids; + +	rcu_read_lock(); + +	k.masks = rcu_dereference(sched_domains_numa_masks); +	if (!k.masks) +		goto unlock; + +	hop_masks = bsearch(&k, k.masks, sched_domains_numa_levels, sizeof(k.masks[0]), hop_cmp); +	hop = hop_masks	- k.masks; + +	ret = hop ? +		cpumask_nth_and_andnot(cpu - k.w, cpus, k.masks[hop][node], k.masks[hop-1][node]) : +		cpumask_nth_and(cpu, cpus, k.masks[0][node]); +unlock: +	rcu_read_unlock(); +	return ret; +} +EXPORT_SYMBOL_GPL(sched_numa_find_nth_cpu); + +/** + * sched_numa_hop_mask() - Get the cpumask of CPUs at most @hops hops away from + *                         @node + * @node: The node to count hops from. + * @hops: Include CPUs up to that many hops away. 0 means local node. + * + * Return: On success, a pointer to a cpumask of CPUs at most @hops away from + * @node, an error value otherwise. + * + * Requires rcu_lock to be held. Returned cpumask is only valid within that + * read-side section, copy it if required beyond that. + * + * Note that not all hops are equal in distance; see sched_init_numa() for how + * distances and masks are handled. + * Also note that this is a reflection of sched_domains_numa_masks, which may change + * during the lifetime of the system (offline nodes are taken out of the masks). + */ +const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops) +{ +	struct cpumask ***masks; + +	if (node >= nr_node_ids || hops >= sched_domains_numa_levels) +		return ERR_PTR(-EINVAL); + +	masks = rcu_dereference(sched_domains_numa_masks); +	if (!masks) +		return ERR_PTR(-EBUSY); + +	return masks[hops][node]; +} +EXPORT_SYMBOL_GPL(sched_numa_hop_mask); +  #endif /* CONFIG_NUMA */  static int __sdt_alloc(const struct cpumask *cpu_map) @@ -2451,7 +2546,7 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)   * Set up scheduler domains and groups.  For now this just excludes isolated   * CPUs, but could be used to exclude other special cases in the future.   */ -int sched_init_domains(const struct cpumask *cpu_map) +int __init sched_init_domains(const struct cpumask *cpu_map)  {  	int err;  |