diff options
Diffstat (limited to 'arch/arm/kernel/topology.c')
| -rw-r--r-- | arch/arm/kernel/topology.c | 60 | 
1 files changed, 6 insertions, 54 deletions
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index d17cb1e6d679..5b9faba03afb 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -177,17 +177,6 @@ static inline void parse_dt_topology(void) {}  static inline void update_cpu_capacity(unsigned int cpuid) {}  #endif - /* - * cpu topology table - */ -struct cputopo_arm cpu_topology[NR_CPUS]; -EXPORT_SYMBOL_GPL(cpu_topology); - -const struct cpumask *cpu_coregroup_mask(int cpu) -{ -	return &cpu_topology[cpu].core_sibling; -} -  /*   * The current assumption is that we can power gate each core independently.   * This will be superseded by DT binding once available. @@ -197,32 +186,6 @@ const struct cpumask *cpu_corepower_mask(int cpu)  	return &cpu_topology[cpu].thread_sibling;  } -static void update_siblings_masks(unsigned int cpuid) -{ -	struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; -	int cpu; - -	/* update core and thread sibling masks */ -	for_each_possible_cpu(cpu) { -		cpu_topo = &cpu_topology[cpu]; - -		if (cpuid_topo->socket_id != cpu_topo->socket_id) -			continue; - -		cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); -		if (cpu != cpuid) -			cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); - -		if (cpuid_topo->core_id != cpu_topo->core_id) -			continue; - -		cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); -		if (cpu != cpuid) -			cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); -	} -	smp_wmb(); -} -  /*   * store_cpu_topology is called at boot when only one cpu is running   * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, @@ -230,7 +193,7 @@ static void update_siblings_masks(unsigned int cpuid)   */  void store_cpu_topology(unsigned int cpuid)  { -	struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; +	struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];  	unsigned int mpidr;  	/* If the cpu topology has been already set, just return */ @@ -250,12 +213,12 @@ void store_cpu_topology(unsigned int cpuid)  			/* core performance interdependency */  			cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);  			cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); -			cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); +			cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);  		} else {  			/* largely independent cores */  			cpuid_topo->thread_id = -1;  			cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); -			cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); +			cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);  		}  	} else {  		/* @@ -265,7 +228,7 @@ void store_cpu_topology(unsigned int cpuid)  		 */  		cpuid_topo->thread_id = -1;  		cpuid_topo->core_id = 0; -		cpuid_topo->socket_id = -1; +		cpuid_topo->package_id = -1;  	}  	update_siblings_masks(cpuid); @@ -275,7 +238,7 @@ void store_cpu_topology(unsigned int cpuid)  	pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",  		cpuid, cpu_topology[cpuid].thread_id,  		cpu_topology[cpuid].core_id, -		cpu_topology[cpuid].socket_id, mpidr); +		cpu_topology[cpuid].package_id, mpidr);  }  static inline int cpu_corepower_flags(void) @@ -298,18 +261,7 @@ static struct sched_domain_topology_level arm_topology[] = {   */  void __init init_cpu_topology(void)  { -	unsigned int cpu; - -	/* init core mask and capacity */ -	for_each_possible_cpu(cpu) { -		struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]); - -		cpu_topo->thread_id = -1; -		cpu_topo->core_id =  -1; -		cpu_topo->socket_id = -1; -		cpumask_clear(&cpu_topo->core_sibling); -		cpumask_clear(&cpu_topo->thread_sibling); -	} +	reset_cpu_topology();  	smp_wmb();  	parse_dt_topology();  |