diff options
| author | Andreas Herrmann <[email protected]> | 2009-08-18 12:54:06 +0200 | 
|---|---|---|
| committer | Ingo Molnar <[email protected]> | 2009-08-18 18:35:40 +0200 | 
| commit | 7f4588f3aa395632fec9ba2e15a1920f0682fda0 (patch) | |
| tree | b827a407ef4f509e80aa60bbc0b0eb0dcf8cf402 /kernel/sched.c | |
| parent | 2109b99ee192764b407dc7f52babb74740eea6f9 (diff) | |
sched: Separate out build of NUMA sched domain from __build_sched_domains
... to further strip down __build_sched_domains().
Signed-off-by: Andreas Herrmann <[email protected]>
Cc: Peter Zijlstra <[email protected]>
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Diffstat (limited to 'kernel/sched.c')
| -rw-r--r-- | kernel/sched.c | 57 | 
1 files changed, 32 insertions, 25 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index c5d1fee42360..dd95a4708370 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -8482,6 +8482,37 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,  	return sa_rootdomain;  } +static struct sched_domain *__build_numa_sched_domains(struct s_data *d, +	const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i) +{ +	struct sched_domain *sd = NULL; +#ifdef CONFIG_NUMA +	struct sched_domain *parent; + +	d->sd_allnodes = 0; +	if (cpumask_weight(cpu_map) > +	    SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) { +		sd = &per_cpu(allnodes_domains, i).sd; +		SD_INIT(sd, ALLNODES); +		set_domain_attribute(sd, attr); +		cpumask_copy(sched_domain_span(sd), cpu_map); +		cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask); +		d->sd_allnodes = 1; +	} +	parent = sd; + +	sd = &per_cpu(node_domains, i).sd; +	SD_INIT(sd, NODE); +	set_domain_attribute(sd, attr); +	sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); +	sd->parent = parent; +	if (parent) +		parent->child = sd; +	cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map); +#endif +	return sd; +} +  /*   * Build sched domains for a given set of cpus and attach the sched domains   * to the individual cpus @@ -8510,31 +8541,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,  		cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),  			    cpu_map); -#ifdef CONFIG_NUMA -		if (cpumask_weight(cpu_map) > -				SD_NODES_PER_DOMAIN*cpumask_weight(d.nodemask)) { -			sd = &per_cpu(allnodes_domains, i).sd; -			SD_INIT(sd, ALLNODES); -			set_domain_attribute(sd, attr); -			cpumask_copy(sched_domain_span(sd), cpu_map); -			cpu_to_allnodes_group(i, cpu_map, &sd->groups, -					      d.tmpmask); -			p = sd; -			d.sd_allnodes = 1; -		} else -			p = NULL; - -		sd = &per_cpu(node_domains, i).sd; -		SD_INIT(sd, NODE); -		set_domain_attribute(sd, attr); -		sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); -		sd->parent = p; -		if (p) -			p->child = sd; -		cpumask_and(sched_domain_span(sd), -			    sched_domain_span(sd), cpu_map); -#endif - +		sd = __build_numa_sched_domains(&d, cpu_map, attr, i);  		p = sd;  		sd = &per_cpu(phys_domains, i).sd;  		SD_INIT(sd, CPU);  |