diff options
Diffstat (limited to 'arch/arm/kernel/smp.c')
| -rw-r--r-- | arch/arm/kernel/smp.c | 53 | 
1 files changed, 18 insertions, 35 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 84f4cbf652e5..31644f1978d5 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -125,18 +125,6 @@ void __init smp_init_cpus(void)  		smp_ops.smp_init_cpus();  } -static void __init platform_smp_prepare_cpus(unsigned int max_cpus) -{ -	if (smp_ops.smp_prepare_cpus) -		smp_ops.smp_prepare_cpus(max_cpus); -} - -static void __cpuinit platform_secondary_init(unsigned int cpu) -{ -	if (smp_ops.smp_secondary_init) -		smp_ops.smp_secondary_init(cpu); -} -  int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)  {  	if (smp_ops.smp_boot_secondary) @@ -154,12 +142,6 @@ static int platform_cpu_kill(unsigned int cpu)  	return 1;  } -static void platform_cpu_die(unsigned int cpu) -{ -	if (smp_ops.cpu_die) -		smp_ops.cpu_die(cpu); -} -  static int platform_cpu_disable(unsigned int cpu)  {  	if (smp_ops.cpu_disable) @@ -257,7 +239,8 @@ void __ref cpu_die(void)  	 * actual CPU shutdown procedure is at least platform (if not  	 * CPU) specific.  	 */ -	platform_cpu_die(cpu); +	if (smp_ops.cpu_die) +		smp_ops.cpu_die(cpu);  	/*  	 * Do not return to the idle loop - jump back to the secondary @@ -302,6 +285,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)  	 * switch away from it before attempting any exclusive accesses.  	 */  	cpu_switch_mm(mm->pgd, mm); +	local_flush_bp_all();  	enter_lazy_tlb(mm, current);  	local_flush_tlb_all(); @@ -324,7 +308,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)  	/*  	 * Give the platform a chance to do its own initialisation.  	 */ -	platform_secondary_init(cpu); +	if (smp_ops.smp_secondary_init) +		smp_ops.smp_secondary_init(cpu);  	notify_cpu_starting(cpu); @@ -399,8 +384,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)  		/*  		 * Initialise the present map, which describes the set of CPUs  		 * actually populated at the present time. A platform should -		 * re-initialize the map in platform_smp_prepare_cpus() if -		 * present != possible (e.g. physical hotplug). +		 * re-initialize the map in the platforms smp_prepare_cpus() +		 * if present != possible (e.g. physical hotplug).  		 */  		init_cpu_present(cpu_possible_mask); @@ -408,7 +393,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)  		 * Initialise the SCU if there are more than one CPU  		 * and let them know where to start.  		 */ -		platform_smp_prepare_cpus(max_cpus); +		if (smp_ops.smp_prepare_cpus) +			smp_ops.smp_prepare_cpus(max_cpus);  	}  } @@ -416,7 +402,8 @@ static void (*smp_cross_call)(const struct cpumask *, unsigned int);  void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))  { -	smp_cross_call = fn; +	if (!smp_cross_call) +		smp_cross_call = fn;  }  void arch_send_call_function_ipi_mask(const struct cpumask *mask) @@ -475,19 +462,11 @@ u64 smp_irq_stat_cpu(unsigned int cpu)   */  static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); -static void ipi_timer(void) -{ -	struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent); -	evt->event_handler(evt); -} -  #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST -static void smp_timer_broadcast(const struct cpumask *mask) +void tick_broadcast(const struct cpumask *mask)  {  	smp_cross_call(mask, IPI_TIMER);  } -#else -#define smp_timer_broadcast	NULL  #endif  static void broadcast_timer_set_mode(enum clock_event_mode mode, @@ -530,7 +509,6 @@ static void __cpuinit percpu_timer_setup(void)  	struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);  	evt->cpumask = cpumask_of(cpu); -	evt->broadcast = smp_timer_broadcast;  	if (!lt_ops || lt_ops->setup(evt))  		broadcast_timer_setup(evt); @@ -596,11 +574,13 @@ void handle_IPI(int ipinr, struct pt_regs *regs)  	case IPI_WAKEUP:  		break; +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST  	case IPI_TIMER:  		irq_enter(); -		ipi_timer(); +		tick_receive_broadcast();  		irq_exit();  		break; +#endif  	case IPI_RESCHEDULE:  		scheduler_ipi(); @@ -693,6 +673,9 @@ static int cpufreq_callback(struct notifier_block *nb,  	if (freq->flags & CPUFREQ_CONST_LOOPS)  		return NOTIFY_OK; +	if (arm_delay_ops.const_clock) +		return NOTIFY_OK; +  	if (!per_cpu(l_p_j_ref, cpu)) {  		per_cpu(l_p_j_ref, cpu) =  			per_cpu(cpu_data, cpu).loops_per_jiffy;  |