diff options
| author | Arnd Bergmann <[email protected]> | 2012-03-24 19:13:59 +0000 |
|---|---|---|
| committer | Arnd Bergmann <[email protected]> | 2012-03-24 19:13:59 +0000 |
| commit | 83fe628e16d84efc8df2731bc403eae4e4f53801 (patch) | |
| tree | 9a51c292235621d0f4f632c2a55ddb5a6ab582af /kernel/hung_task.c | |
| parent | 853a0231e057c04255a848f6998f84faaa635c58 (diff) | |
| parent | 426f1af947c61dee48a9267f84bff227e503a547 (diff) | |
Merge branch 'renesas/soc' into next/soc2
Diffstat (limited to 'kernel/hung_task.c')
| -rw-r--r-- | kernel/hung_task.c | 11 |
1 files changed, 7 insertions, 4 deletions
diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 2e48ec0c2e91..c21449f85a2a 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -119,15 +119,20 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) * For preemptible RCU it is sufficient to call rcu_read_unlock in order * to exit the grace period. For classic RCU, a reschedule is required. */ -static void rcu_lock_break(struct task_struct *g, struct task_struct *t) +static bool rcu_lock_break(struct task_struct *g, struct task_struct *t) { + bool can_cont; + get_task_struct(g); get_task_struct(t); rcu_read_unlock(); cond_resched(); rcu_read_lock(); + can_cont = pid_alive(g) && pid_alive(t); put_task_struct(t); put_task_struct(g); + + return can_cont; } /* @@ -154,9 +159,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout) goto unlock; if (!--batch_count) { batch_count = HUNG_TASK_BATCHING; - rcu_lock_break(g, t); - /* Exit if t or g was unhashed during refresh. */ - if (t->state == TASK_DEAD || g->state == TASK_DEAD) + if (!rcu_lock_break(g, t)) goto unlock; } /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */ |