diff options
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 33 | 
1 files changed, 12 insertions, 21 deletions
| diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8fdb710bfdd7..43d18cb46308 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -38,7 +38,6 @@  #include <linux/hardirq.h>  #include <linux/mempolicy.h>  #include <linux/freezer.h> -#include <linux/kallsyms.h>  #include <linux/debug_locks.h>  #include <linux/lockdep.h>  #include <linux/idr.h> @@ -48,6 +47,7 @@  #include <linux/nodemask.h>  #include <linux/moduleparam.h>  #include <linux/uaccess.h> +#include <linux/sched/isolation.h>  #include "workqueue_internal.h" @@ -1634,7 +1634,7 @@ static void worker_enter_idle(struct worker *worker)  		mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);  	/* -	 * Sanity check nr_running.  Because wq_unbind_fn() releases +	 * Sanity check nr_running.  Because unbind_workers() releases  	 * pool->lock between setting %WORKER_UNBOUND and zapping  	 * nr_running, the warning may trigger spuriously.  Check iff  	 * unbind is not in progress. @@ -4510,9 +4510,8 @@ void show_workqueue_state(void)   * cpu comes back online.   */ -static void wq_unbind_fn(struct work_struct *work) +static void unbind_workers(int cpu)  { -	int cpu = smp_processor_id();  	struct worker_pool *pool;  	struct worker *worker; @@ -4589,16 +4588,6 @@ static void rebind_workers(struct worker_pool *pool)  	spin_lock_irq(&pool->lock); -	/* -	 * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED -	 * w/o preceding DOWN_PREPARE.  Work around it.  CPU hotplug is -	 * being reworked and this can go away in time. -	 */ -	if (!(pool->flags & POOL_DISASSOCIATED)) { -		spin_unlock_irq(&pool->lock); -		return; -	} -  	pool->flags &= ~POOL_DISASSOCIATED;  	for_each_pool_worker(worker, pool) { @@ -4709,12 +4698,13 @@ int workqueue_online_cpu(unsigned int cpu)  int workqueue_offline_cpu(unsigned int cpu)  { -	struct work_struct unbind_work;  	struct workqueue_struct *wq;  	/* unbinding per-cpu workers should happen on the local CPU */ -	INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); -	queue_work_on(cpu, system_highpri_wq, &unbind_work); +	if (WARN_ON(cpu != smp_processor_id())) +		return -1; + +	unbind_workers(cpu);  	/* update NUMA affinity of unbound workqueues */  	mutex_lock(&wq_pool_mutex); @@ -4722,9 +4712,6 @@ int workqueue_offline_cpu(unsigned int cpu)  		wq_update_unbound_numa(wq, cpu, false);  	mutex_unlock(&wq_pool_mutex); -	/* wait for per-cpu unbinding to finish */ -	flush_work(&unbind_work); -	destroy_work_on_stack(&unbind_work);  	return 0;  } @@ -4957,6 +4944,10 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)  	if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))  		return -ENOMEM; +	/* +	 * Not excluding isolated cpus on purpose. +	 * If the user wishes to include them, we allow that. +	 */  	cpumask_and(cpumask, cpumask, cpu_possible_mask);  	if (!cpumask_empty(cpumask)) {  		apply_wqattrs_lock(); @@ -5555,7 +5546,7 @@ int __init workqueue_init_early(void)  	WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));  	BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); -	cpumask_copy(wq_unbound_cpumask, cpu_possible_mask); +	cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_FLAG_DOMAIN));  	pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); |