diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-04-05 09:35:58 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-04-05 09:35:58 -0700 |
commit | 0a50438c84363bd37fe18fe432888ae9a074dcab (patch) | |
tree | 1e3f95e5709d1ba184f80fb91e71f3d39fd2b0a9 | |
parent | e49d033bddf5b565044e2abe4241353959bc9120 (diff) | |
parent | 89e28ce60cb65971c73359c66d076aa20a395cd5 (diff) |
Merge branch 'for-5.12-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue fixes from Tejun Heo:
"Two workqueue fixes.
One is around debugobj and poses no risk. The other is to prevent the
stall watchdog from firing spuriously in certain conditions. Not as
trivial as debugobj change but is still fairly low risk"
* 'for-5.12-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
workqueue/watchdog: Make unbound workqueues aware of touch_softlockup_watchdog() 84;0;0c84;0;0c There are two workqueue-specific watchdog timestamps:
workqueue: Move the position of debug_work_activate() in __queue_work()
-rw-r--r-- | kernel/watchdog.c | 5 | ||||
-rw-r--r-- | kernel/workqueue.c | 19 |
2 files changed, 10 insertions, 14 deletions
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 71109065bd8e..107bc38b1945 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -278,9 +278,10 @@ void touch_all_softlockup_watchdogs(void) * update as well, the only side effect might be a cycle delay for * the softlockup check. */ - for_each_cpu(cpu, &watchdog_allowed_mask) + for_each_cpu(cpu, &watchdog_allowed_mask) { per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET; - wq_watchdog_touch(-1); + wq_watchdog_touch(cpu); + } } void touch_softlockup_watchdog_sync(void) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 0d150da252e8..79f2319543ce 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1412,7 +1412,6 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, */ lockdep_assert_irqs_disabled(); - debug_work_activate(work); /* if draining, only works from the same workqueue are allowed */ if (unlikely(wq->flags & __WQ_DRAINING) && @@ -1494,6 +1493,7 @@ retry: worklist = &pwq->delayed_works; } + debug_work_activate(work); insert_work(pwq, work, worklist, work_flags); out: @@ -5787,22 +5787,17 @@ static void wq_watchdog_timer_fn(struct timer_list *unused) continue; /* get the latest of pool and touched timestamps */ + if (pool->cpu >= 0) + touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu)); + else + touched = READ_ONCE(wq_watchdog_touched); pool_ts = READ_ONCE(pool->watchdog_ts); - touched = READ_ONCE(wq_watchdog_touched); if (time_after(pool_ts, touched)) ts = pool_ts; else ts = touched; - if (pool->cpu >= 0) { - unsigned long cpu_touched = - READ_ONCE(per_cpu(wq_watchdog_touched_cpu, - pool->cpu)); - if (time_after(cpu_touched, ts)) - ts = cpu_touched; - } - /* did we stall? */ if (time_after(jiffies, ts + thresh)) { lockup_detected = true; @@ -5826,8 +5821,8 @@ notrace void wq_watchdog_touch(int cpu) { if (cpu >= 0) per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; - else - wq_watchdog_touched = jiffies; + + wq_watchdog_touched = jiffies; } static void wq_watchdog_set_thresh(unsigned long thresh) |