diff options
author | Kees Cook <[email protected]> | 2021-09-29 15:02:14 -0700 |
---|---|---|
committer | Peter Zijlstra <[email protected]> | 2021-10-15 11:25:14 +0200 |
commit | 42a20f86dc19f9282d974df0ba4d226c865ab9dd (patch) | |
tree | 548e09e31785a53f7907e0e4c28471d0e53fcbb5 /kernel/sched/core.c | |
parent | bc9bbb81730ea667c31c5b284f95ee312bab466f (diff) |
sched: Add wrapper for get_wchan() to keep task blocked
Having a stable wchan means the process must be blocked and for it to
stay that way while performing stack unwinding.
Suggested-by: Peter Zijlstra <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Acked-by: Geert Uytterhoeven <[email protected]>
Acked-by: Russell King (Oracle) <[email protected]> [arm]
Tested-by: Mark Rutland <[email protected]> [arm64]
Link: https://lkml.kernel.org/r/[email protected]
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 19 |
1 files changed, 19 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 935c2da00339..f2611b9cf503 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1966,6 +1966,25 @@ bool sched_task_on_rq(struct task_struct *p) return task_on_rq_queued(p); } +unsigned long get_wchan(struct task_struct *p) +{ + unsigned long ip = 0; + unsigned int state; + + if (!p || p == current) + return 0; + + /* Only get wchan if task is blocked and we can keep it that way. */ + raw_spin_lock_irq(&p->pi_lock); + state = READ_ONCE(p->__state); + smp_rmb(); /* see try_to_wake_up() */ + if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq) + ip = __get_wchan(p); + raw_spin_unlock_irq(&p->pi_lock); + + return ip; +} + static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) { if (!(flags & ENQUEUE_NOCLOCK)) |