diff options
| author | Bartlomiej Zolnierkiewicz <[email protected]> | 2019-02-07 16:44:43 +0100 |
|---|---|---|
| committer | Bartlomiej Zolnierkiewicz <[email protected]> | 2019-02-07 16:44:43 +0100 |
| commit | 82ffd0454bd9bd57780966d47bfd56d579dd4fb3 (patch) | |
| tree | a735cfea934b7c4eac4a2c228cd95620c1daf969 /kernel/workqueue.c | |
| parent | 890d14d2d4b57ff5a149309da3ed36c8a529987f (diff) | |
| parent | 8834f5600cf3c8db365e18a3d5cac2c2780c81e5 (diff) | |
Merge tag 'v5.0-rc5' of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux into fbdev-for-next
Linux 5.0-rc5
Sync with upstream (which now contains fbdev-v5.0-rc3 changes) to
prepare a base for fbdev-v5.1 changes.
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 31 |
1 files changed, 27 insertions, 4 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 0280deac392e..fc5d23d752a5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -910,6 +910,26 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task) } /** + * wq_worker_last_func - retrieve worker's last work function + * + * Determine the last function a worker executed. This is called from + * the scheduler to get a worker's last known identity. + * + * CONTEXT: + * spin_lock_irq(rq->lock) + * + * Return: + * The last work function %current executed as a worker, NULL if it + * hasn't executed any work yet. + */ +work_func_t wq_worker_last_func(struct task_struct *task) +{ + struct worker *worker = kthread_data(task); + + return worker->last_func; +} + +/** * worker_set_flags - set worker flags and adjust nr_running accordingly * @worker: self * @flags: flags to set @@ -2184,6 +2204,9 @@ __acquires(&pool->lock) if (unlikely(cpu_intensive)) worker_clr_flags(worker, WORKER_CPU_INTENSIVE); + /* tag the worker for identification in schedule() */ + worker->last_func = worker->current_func; + /* we're done with it, release */ hash_del(&worker->hentry); worker->current_work = NULL; @@ -3396,7 +3419,7 @@ static void put_unbound_pool(struct worker_pool *pool) del_timer_sync(&pool->mayday_timer); /* sched-RCU protected to allow dereferences from get_work_pool() */ - call_rcu_sched(&pool->rcu, rcu_free_pool); + call_rcu(&pool->rcu, rcu_free_pool); } /** @@ -3503,14 +3526,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work) put_unbound_pool(pool); mutex_unlock(&wq_pool_mutex); - call_rcu_sched(&pwq->rcu, rcu_free_pwq); + call_rcu(&pwq->rcu, rcu_free_pwq); /* * If we're the last pwq going away, @wq is already dead and no one * is gonna access it anymore. Schedule RCU free. */ if (is_last) - call_rcu_sched(&wq->rcu, rcu_free_wq); + call_rcu(&wq->rcu, rcu_free_wq); } /** @@ -4195,7 +4218,7 @@ void destroy_workqueue(struct workqueue_struct *wq) * The base ref is never dropped on per-cpu pwqs. Directly * schedule RCU free. */ - call_rcu_sched(&wq->rcu, rcu_free_wq); + call_rcu(&wq->rcu, rcu_free_wq); } else { /* * We're the sole accessor of @wq at this point. Directly |