diff options
| author | Mark Brown <[email protected]> | 2015-10-12 18:09:27 +0100 | 
|---|---|---|
| committer | Mark Brown <[email protected]> | 2015-10-12 18:09:27 +0100 | 
| commit | 79828b4fa835f73cdaf4bffa48696abdcbea9d02 (patch) | |
| tree | 5e0fa7156acb75ba603022bc807df8f2fedb97a8 /kernel/cpu.c | |
| parent | 721b51fcf91898299d96f4b72cb9434cda29dce6 (diff) | |
| parent | 8c1a9d6323abf0fb1e5dad96cf3f1c783505ea5a (diff) | |
Merge remote-tracking branch 'asoc/fix/rt5645' into asoc-fix-rt5645
Diffstat (limited to 'kernel/cpu.c')
| -rw-r--r-- | kernel/cpu.c | 61 | 
1 files changed, 29 insertions, 32 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 6a374544d495..82cf9dff4295 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -191,21 +191,22 @@ void cpu_hotplug_done(void)  void cpu_hotplug_disable(void)  {  	cpu_maps_update_begin(); -	cpu_hotplug_disabled = 1; +	cpu_hotplug_disabled++;  	cpu_maps_update_done();  } +EXPORT_SYMBOL_GPL(cpu_hotplug_disable);  void cpu_hotplug_enable(void)  {  	cpu_maps_update_begin(); -	cpu_hotplug_disabled = 0; +	WARN_ON(--cpu_hotplug_disabled < 0);  	cpu_maps_update_done();  } - +EXPORT_SYMBOL_GPL(cpu_hotplug_enable);  #endif	/* CONFIG_HOTPLUG_CPU */  /* Need to know about CPUs going up/down? */ -int __ref register_cpu_notifier(struct notifier_block *nb) +int register_cpu_notifier(struct notifier_block *nb)  {  	int ret;  	cpu_maps_update_begin(); @@ -214,7 +215,7 @@ int __ref register_cpu_notifier(struct notifier_block *nb)  	return ret;  } -int __ref __register_cpu_notifier(struct notifier_block *nb) +int __register_cpu_notifier(struct notifier_block *nb)  {  	return raw_notifier_chain_register(&cpu_chain, nb);  } @@ -244,7 +245,7 @@ static void cpu_notify_nofail(unsigned long val, void *v)  EXPORT_SYMBOL(register_cpu_notifier);  EXPORT_SYMBOL(__register_cpu_notifier); -void __ref unregister_cpu_notifier(struct notifier_block *nb) +void unregister_cpu_notifier(struct notifier_block *nb)  {  	cpu_maps_update_begin();  	raw_notifier_chain_unregister(&cpu_chain, nb); @@ -252,7 +253,7 @@ void __ref unregister_cpu_notifier(struct notifier_block *nb)  }  EXPORT_SYMBOL(unregister_cpu_notifier); -void __ref __unregister_cpu_notifier(struct notifier_block *nb) +void __unregister_cpu_notifier(struct notifier_block *nb)  {  	raw_notifier_chain_unregister(&cpu_chain, nb);  } @@ -329,7 +330,7 @@ struct take_cpu_down_param {  };  /* Take this CPU down. */ -static int __ref take_cpu_down(void *_param) +static int take_cpu_down(void *_param)  {  	struct take_cpu_down_param *param = _param;  	int err; @@ -348,7 +349,7 @@ static int __ref take_cpu_down(void *_param)  }  /* Requires cpu_add_remove_lock to be held */ -static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) +static int _cpu_down(unsigned int cpu, int tasks_frozen)  {  	int err, nr_calls = 0;  	void *hcpu = (void *)(long)cpu; @@ -381,14 +382,14 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)  	 * will observe it.  	 *  	 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might -	 * not imply sync_sched(), so explicitly call both. +	 * not imply sync_sched(), so wait for both.  	 *  	 * Do sync before park smpboot threads to take care the rcu boost case.  	 */ -#ifdef CONFIG_PREEMPT -	synchronize_sched(); -#endif -	synchronize_rcu(); +	if (IS_ENABLED(CONFIG_PREEMPT)) +		synchronize_rcu_mult(call_rcu, call_rcu_sched); +	else +		synchronize_rcu();  	smpboot_park_threads(cpu); @@ -401,7 +402,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)  	/*  	 * So now all preempt/rcu users must observe !cpu_active().  	 */ -	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); +	err = stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));  	if (err) {  		/* CPU didn't die: tell everyone.  Can't complain. */  		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); @@ -442,7 +443,7 @@ out_release:  	return err;  } -int __ref cpu_down(unsigned int cpu) +int cpu_down(unsigned int cpu)  {  	int err; @@ -527,18 +528,9 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen)  		goto out_notify;  	} -	/* -	 * Some architectures have to walk the irq descriptors to -	 * setup the vector space for the cpu which comes online. -	 * Prevent irq alloc/free across the bringup. -	 */ -	irq_lock_sparse(); -  	/* Arch-specific enabling code. */  	ret = __cpu_up(cpu, idle); -	irq_unlock_sparse(); -  	if (ret != 0)  		goto out_notify;  	BUG_ON(!cpu_online(cpu)); @@ -617,13 +609,18 @@ int disable_nonboot_cpus(void)  		}  	} -	if (!error) { +	if (!error)  		BUG_ON(num_online_cpus() > 1); -		/* Make sure the CPUs won't be enabled by someone else */ -		cpu_hotplug_disabled = 1; -	} else { +	else  		pr_err("Non-boot CPUs are not disabled\n"); -	} + +	/* +	 * Make sure the CPUs won't be enabled by someone else. We need to do +	 * this even in case of failure as all disable_nonboot_cpus() users are +	 * supposed to do enable_nonboot_cpus() on the failure path. +	 */ +	cpu_hotplug_disabled++; +  	cpu_maps_update_done();  	return error;  } @@ -636,13 +633,13 @@ void __weak arch_enable_nonboot_cpus_end(void)  {  } -void __ref enable_nonboot_cpus(void) +void enable_nonboot_cpus(void)  {  	int cpu, error;  	/* Allow everyone to use the CPU hotplug again */  	cpu_maps_update_begin(); -	cpu_hotplug_disabled = 0; +	WARN_ON(--cpu_hotplug_disabled < 0);  	if (cpumask_empty(frozen_cpus))  		goto out;  |