diff options
Diffstat (limited to 'kernel/sched/topology.c')
| -rw-r--r-- | kernel/sched/topology.c | 35 | 
1 files changed, 25 insertions, 10 deletions
| diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 4e8698e62f07..d201a7052a29 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -526,7 +526,7 @@ static int init_rootdomain(struct root_domain *rd)  #ifdef HAVE_RT_PUSH_IPI  	rd->rto_cpu = -1;  	raw_spin_lock_init(&rd->rto_lock); -	init_irq_work(&rd->rto_push_work, rto_push_irq_work_func); +	rd->rto_push_work = IRQ_WORK_INIT_HARD(rto_push_irq_work_func);  #endif  	rd->visit_gen = 0; @@ -688,7 +688,6 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)  {  	struct rq *rq = cpu_rq(cpu);  	struct sched_domain *tmp; -	int numa_distance = 0;  	/* Remove the sched domains which do not contribute to scheduling. */  	for (tmp = sd; tmp; ) { @@ -716,13 +715,22 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)  		tmp = sd;  		sd = sd->parent;  		destroy_sched_domain(tmp); -		if (sd) +		if (sd) { +			struct sched_group *sg = sd->groups; + +			/* +			 * sched groups hold the flags of the child sched +			 * domain for convenience. Clear such flags since +			 * the child is being destroyed. +			 */ +			do { +				sg->flags = 0; +			} while (sg != sd->groups); +  			sd->child = NULL; +		}  	} -	for (tmp = sd; tmp; tmp = tmp->parent) -		numa_distance += !!(tmp->flags & SD_NUMA); -  	sched_domain_debug(sd, cpu);  	rq_attach_root(rq, rd); @@ -916,10 +924,12 @@ build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)  		return NULL;  	sg_span = sched_group_span(sg); -	if (sd->child) +	if (sd->child) {  		cpumask_copy(sg_span, sched_domain_span(sd->child)); -	else +		sg->flags = sd->child->flags; +	} else {  		cpumask_copy(sg_span, sched_domain_span(sd)); +	}  	atomic_inc(&sg->ref);  	return sg; @@ -1169,6 +1179,7 @@ static struct sched_group *get_group(int cpu, struct sd_data *sdd)  	if (child) {  		cpumask_copy(sched_group_span(sg), sched_domain_span(child));  		cpumask_copy(group_balance_mask(sg), sched_group_span(sg)); +		sg->flags = child->flags;  	} else {  		cpumask_set_cpu(cpu, sched_group_span(sg));  		cpumask_set_cpu(cpu, group_balance_mask(sg)); @@ -1481,7 +1492,6 @@ static int			sched_domains_curr_level;  int				sched_max_numa_distance;  static int			*sched_domains_numa_distance;  static struct cpumask		***sched_domains_numa_masks; -int __read_mostly		node_reclaim_distance = RECLAIM_DISTANCE;  static unsigned long __read_mostly *sched_numa_onlined_nodes;  #endif @@ -1557,7 +1567,7 @@ sd_init(struct sched_domain_topology_level *tl,  		.last_balance		= jiffies,  		.balance_interval	= sd_weight,  		.max_newidle_lb_cost	= 0, -		.next_decay_max_lb_cost	= jiffies, +		.last_decay_max_lb_cost	= jiffies,  		.child			= child,  #ifdef CONFIG_SCHED_DEBUG  		.name			= tl->name, @@ -1627,6 +1637,11 @@ static struct sched_domain_topology_level default_topology[] = {  #ifdef CONFIG_SCHED_SMT  	{ cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },  #endif + +#ifdef CONFIG_SCHED_CLUSTER +	{ cpu_clustergroup_mask, cpu_cluster_flags, SD_INIT_NAME(CLS) }, +#endif +  #ifdef CONFIG_SCHED_MC  	{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },  #endif |