diff options
Diffstat (limited to 'kernel/sched/topology.c')
| -rw-r--r-- | kernel/sched/topology.c | 59 | 
1 files changed, 28 insertions, 31 deletions
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 99ea5986038c..329c82faca9b 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -322,7 +322,6 @@ static struct ctl_table sched_energy_aware_sysctls[] = {  		.extra1         = SYSCTL_ZERO,  		.extra2         = SYSCTL_ONE,  	}, -	{}  };  static int __init sched_energy_aware_sysctl_init(void) @@ -1330,23 +1329,12 @@ next:  }  /* - * Asymmetric CPU capacity bits - */ -struct asym_cap_data { -	struct list_head link; -	unsigned long capacity; -	unsigned long cpus[]; -}; - -/*   * Set of available CPUs grouped by their corresponding capacities   * Each list entry contains a CPU mask reflecting CPUs that share the same   * capacity.   * The lifespan of data is unlimited.   */ -static LIST_HEAD(asym_cap_list); - -#define cpu_capacity_span(asym_data) to_cpumask((asym_data)->cpus) +LIST_HEAD(asym_cap_list);  /*   * Verify whether there is any CPU capacity asymmetry in a given sched domain. @@ -1386,21 +1374,39 @@ asym_cpu_capacity_classify(const struct cpumask *sd_span,  } +static void free_asym_cap_entry(struct rcu_head *head) +{ +	struct asym_cap_data *entry = container_of(head, struct asym_cap_data, rcu); +	kfree(entry); +} +  static inline void asym_cpu_capacity_update_data(int cpu)  {  	unsigned long capacity = arch_scale_cpu_capacity(cpu); -	struct asym_cap_data *entry = NULL; +	struct asym_cap_data *insert_entry = NULL; +	struct asym_cap_data *entry; +	/* +	 * Search if capacity already exits. If not, track which the entry +	 * where we should insert to keep the list ordered descendingly. +	 */  	list_for_each_entry(entry, &asym_cap_list, link) {  		if (capacity == entry->capacity)  			goto done; +		else if (!insert_entry && capacity > entry->capacity) +			insert_entry = list_prev_entry(entry, link);  	}  	entry = kzalloc(sizeof(*entry) + cpumask_size(), GFP_KERNEL);  	if (WARN_ONCE(!entry, "Failed to allocate memory for asymmetry data\n"))  		return;  	entry->capacity = capacity; -	list_add(&entry->link, &asym_cap_list); + +	/* If NULL then the new capacity is the smallest, add last. */ +	if (!insert_entry) +		list_add_tail_rcu(&entry->link, &asym_cap_list); +	else +		list_add_rcu(&entry->link, &insert_entry->link);  done:  	__cpumask_set_cpu(cpu, cpu_capacity_span(entry));  } @@ -1423,8 +1429,8 @@ static void asym_cpu_capacity_scan(void)  	list_for_each_entry_safe(entry, next, &asym_cap_list, link) {  		if (cpumask_empty(cpu_capacity_span(entry))) { -			list_del(&entry->link); -			kfree(entry); +			list_del_rcu(&entry->link); +			call_rcu(&entry->rcu, free_asym_cap_entry);  		}  	} @@ -1434,8 +1440,8 @@ static void asym_cpu_capacity_scan(void)  	 */  	if (list_is_singular(&asym_cap_list)) {  		entry = list_first_entry(&asym_cap_list, typeof(*entry), link); -		list_del(&entry->link); -		kfree(entry); +		list_del_rcu(&entry->link); +		call_rcu(&entry->rcu, free_asym_cap_entry);  	}  } @@ -1468,7 +1474,7 @@ static void set_domain_attribute(struct sched_domain *sd,  	} else  		request = attr->relax_domain_level; -	if (sd->level > request) { +	if (sd->level >= request) {  		/* Turn off idle balance on this domain: */  		sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);  	} @@ -2507,16 +2513,9 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att  	/* Attach the domains */  	rcu_read_lock();  	for_each_cpu(i, cpu_map) { -		unsigned long capacity; -  		rq = cpu_rq(i);  		sd = *per_cpu_ptr(d.sd, i); -		capacity = arch_scale_cpu_capacity(i); -		/* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */ -		if (capacity > READ_ONCE(d.rd->max_cpu_capacity)) -			WRITE_ONCE(d.rd->max_cpu_capacity, capacity); -  		cpu_attach_domain(sd, d.rd, i);  		if (lowest_flag_domain(i, SD_CLUSTER)) @@ -2530,10 +2529,8 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att  	if (has_cluster)  		static_branch_inc_cpuslocked(&sched_cluster_active); -	if (rq && sched_debug_verbose) { -		pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n", -			cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); -	} +	if (rq && sched_debug_verbose) +		pr_info("root domain span: %*pbl\n", cpumask_pr_args(cpu_map));  	ret = 0;  error:  |