diff options
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 175 | 
1 files changed, 108 insertions, 67 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 33a6b4a2443d..613917bbc4e7 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -375,6 +375,7 @@ EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);  static int worker_thread(void *__worker);  static void workqueue_sysfs_unregister(struct workqueue_struct *wq);  static void show_pwq(struct pool_workqueue *pwq); +static void show_one_worker_pool(struct worker_pool *pool);  #define CREATE_TRACE_POINTS  #include <trace/events/workqueue.h> @@ -1350,7 +1351,7 @@ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,  	struct worker_pool *pool = pwq->pool;  	/* record the work call stack in order to print it in KASAN reports */ -	kasan_record_aux_stack(work); +	kasan_record_aux_stack_noalloc(work);  	/* we own @work, set data and link */  	set_work_pwq(work, pwq, extra_flags); @@ -4447,7 +4448,7 @@ void destroy_workqueue(struct workqueue_struct *wq)  			raw_spin_unlock_irq(&pwq->pool->lock);  			mutex_unlock(&wq->mutex);  			mutex_unlock(&wq_pool_mutex); -			show_workqueue_state(); +			show_one_workqueue(wq);  			return;  		}  		raw_spin_unlock_irq(&pwq->pool->lock); @@ -4797,83 +4798,116 @@ static void show_pwq(struct pool_workqueue *pwq)  }  /** - * show_workqueue_state - dump workqueue state - * - * Called from a sysrq handler or try_to_freeze_tasks() and prints out - * all busy workqueues and pools. + * show_one_workqueue - dump state of specified workqueue + * @wq: workqueue whose state will be printed   */ -void show_workqueue_state(void) +void show_one_workqueue(struct workqueue_struct *wq)  { -	struct workqueue_struct *wq; -	struct worker_pool *pool; +	struct pool_workqueue *pwq; +	bool idle = true;  	unsigned long flags; -	int pi; -	rcu_read_lock(); - -	pr_info("Showing busy workqueues and worker pools:\n"); - -	list_for_each_entry_rcu(wq, &workqueues, list) { -		struct pool_workqueue *pwq; -		bool idle = true; - -		for_each_pwq(pwq, wq) { -			if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { -				idle = false; -				break; -			} +	for_each_pwq(pwq, wq) { +		if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { +			idle = false; +			break;  		} -		if (idle) -			continue; +	} +	if (idle) /* Nothing to print for idle workqueue */ +		return; -		pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); +	pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); -		for_each_pwq(pwq, wq) { -			raw_spin_lock_irqsave(&pwq->pool->lock, flags); -			if (pwq->nr_active || !list_empty(&pwq->inactive_works)) -				show_pwq(pwq); -			raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); +	for_each_pwq(pwq, wq) { +		raw_spin_lock_irqsave(&pwq->pool->lock, flags); +		if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {  			/* -			 * We could be printing a lot from atomic context, e.g. -			 * sysrq-t -> show_workqueue_state(). Avoid triggering -			 * hard lockup. +			 * Defer printing to avoid deadlocks in console +			 * drivers that queue work while holding locks +			 * also taken in their write paths.  			 */ -			touch_nmi_watchdog(); -		} -	} - -	for_each_pool(pool, pi) { -		struct worker *worker; -		bool first = true; - -		raw_spin_lock_irqsave(&pool->lock, flags); -		if (pool->nr_workers == pool->nr_idle) -			goto next_pool; - -		pr_info("pool %d:", pool->id); -		pr_cont_pool_info(pool); -		pr_cont(" hung=%us workers=%d", -			jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000, -			pool->nr_workers); -		if (pool->manager) -			pr_cont(" manager: %d", -				task_pid_nr(pool->manager->task)); -		list_for_each_entry(worker, &pool->idle_list, entry) { -			pr_cont(" %s%d", first ? "idle: " : "", -				task_pid_nr(worker->task)); -			first = false; +			printk_deferred_enter(); +			show_pwq(pwq); +			printk_deferred_exit();  		} -		pr_cont("\n"); -	next_pool: -		raw_spin_unlock_irqrestore(&pool->lock, flags); +		raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);  		/*  		 * We could be printing a lot from atomic context, e.g. -		 * sysrq-t -> show_workqueue_state(). Avoid triggering +		 * sysrq-t -> show_all_workqueues(). Avoid triggering  		 * hard lockup.  		 */  		touch_nmi_watchdog();  	} +} + +/** + * show_one_worker_pool - dump state of specified worker pool + * @pool: worker pool whose state will be printed + */ +static void show_one_worker_pool(struct worker_pool *pool) +{ +	struct worker *worker; +	bool first = true; +	unsigned long flags; + +	raw_spin_lock_irqsave(&pool->lock, flags); +	if (pool->nr_workers == pool->nr_idle) +		goto next_pool; +	/* +	 * Defer printing to avoid deadlocks in console drivers that +	 * queue work while holding locks also taken in their write +	 * paths. +	 */ +	printk_deferred_enter(); +	pr_info("pool %d:", pool->id); +	pr_cont_pool_info(pool); +	pr_cont(" hung=%us workers=%d", +		jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000, +		pool->nr_workers); +	if (pool->manager) +		pr_cont(" manager: %d", +			task_pid_nr(pool->manager->task)); +	list_for_each_entry(worker, &pool->idle_list, entry) { +		pr_cont(" %s%d", first ? "idle: " : "", +			task_pid_nr(worker->task)); +		first = false; +	} +	pr_cont("\n"); +	printk_deferred_exit(); +next_pool: +	raw_spin_unlock_irqrestore(&pool->lock, flags); +	/* +	 * We could be printing a lot from atomic context, e.g. +	 * sysrq-t -> show_all_workqueues(). Avoid triggering +	 * hard lockup. +	 */ +	touch_nmi_watchdog(); + +} + +/** + * show_all_workqueues - dump workqueue state + * + * Called from a sysrq handler or try_to_freeze_tasks() and prints out + * all busy workqueues and pools. + */ +void show_all_workqueues(void) +{ +	struct workqueue_struct *wq; +	struct worker_pool *pool; +	int pi; + +	rcu_read_lock(); + +	pr_info("Showing busy workqueues and worker pools:\n"); + +	list_for_each_entry_rcu(wq, &workqueues, list) +		show_one_workqueue(wq); + +	for_each_pool(pool, pi) +		show_one_worker_pool(pool); +  	rcu_read_unlock();  } @@ -5370,9 +5404,6 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)  	int ret = -EINVAL;  	cpumask_var_t saved_cpumask; -	if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) -		return -ENOMEM; -  	/*  	 * Not excluding isolated cpus on purpose.  	 * If the user wishes to include them, we allow that. @@ -5380,6 +5411,15 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)  	cpumask_and(cpumask, cpumask, cpu_possible_mask);  	if (!cpumask_empty(cpumask)) {  		apply_wqattrs_lock(); +		if (cpumask_equal(cpumask, wq_unbound_cpumask)) { +			ret = 0; +			goto out_unlock; +		} + +		if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) { +			ret = -ENOMEM; +			goto out_unlock; +		}  		/* save the old wq_unbound_cpumask. */  		cpumask_copy(saved_cpumask, wq_unbound_cpumask); @@ -5392,10 +5432,11 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)  		if (ret < 0)  			cpumask_copy(wq_unbound_cpumask, saved_cpumask); +		free_cpumask_var(saved_cpumask); +out_unlock:  		apply_wqattrs_unlock();  	} -	free_cpumask_var(saved_cpumask);  	return ret;  } @@ -5855,7 +5896,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)  	rcu_read_unlock();  	if (lockup_detected) -		show_workqueue_state(); +		show_all_workqueues();  	wq_watchdog_reset_touched();  	mod_timer(&wq_watchdog_timer, jiffies + thresh);  |