aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Gleixner <[email protected]>2011-06-22 19:47:01 +0200
committerIngo Molnar <[email protected]>2011-08-29 12:26:59 +0200
commit9c40cef2b799f9b5e7fa5de4d2ad3a0168ba118c (patch)
treecb7a48eb2a6d30b90144a55bc6bb82caacfc9622
parentc259e01a1ec90063042f758e409cd26b2a0963c8 (diff)
sched: Move blk_schedule_flush_plug() out of __schedule()
There is no real reason to run blk_schedule_flush_plug() with interrupts and preemption disabled. Move it into schedule() and call it when the task is going voluntarily to sleep. There might be false positives when the task is woken between that call and actually scheduling, but that's not really different from being woken immediately after switching away. This fixes a deadlock in the scheduler where the blk_schedule_flush_plug() callchain enables interrupts and thereby allows a wakeup to happen of the task that's going to sleep. Signed-off-by: Thomas Gleixner <[email protected]> Signed-off-by: Peter Zijlstra <[email protected]> Cc: Tejun Heo <[email protected]> Cc: Jens Axboe <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: [email protected] # 2.6.39+ Link: http://lkml.kernel.org/n/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
-rw-r--r--kernel/sched.c25
1 files changed, 15 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index ec15e8129cf7..511732c39b6e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4322,16 +4322,6 @@ need_resched:
if (to_wakeup)
try_to_wake_up_local(to_wakeup);
}
-
- /*
- * If we are going to sleep and we have plugged IO
- * queued, make sure to submit it to avoid deadlocks.
- */
- if (blk_needs_flush_plug(prev)) {
- raw_spin_unlock(&rq->lock);
- blk_schedule_flush_plug(prev);
- raw_spin_lock(&rq->lock);
- }
}
switch_count = &prev->nvcsw;
}
@@ -4370,8 +4360,23 @@ need_resched:
goto need_resched;
}
+static inline void sched_submit_work(struct task_struct *tsk)
+{
+ if (!tsk->state)
+ return;
+ /*
+ * If we are going to sleep and we have plugged IO queued,
+ * make sure to submit it to avoid deadlocks.
+ */
+ if (blk_needs_flush_plug(tsk))
+ blk_schedule_flush_plug(tsk);
+}
+
asmlinkage void schedule(void)
{
+ struct task_struct *tsk = current;
+
+ sched_submit_work(tsk);
__schedule();
}
EXPORT_SYMBOL(schedule);