aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Wilcox <[email protected]>2018-08-21 21:56:30 -0700
committerLinus Torvalds <[email protected]>2018-08-22 10:52:47 -0700
commitc430d1e848ff1240d126e79780f3c26208b8aed9 (patch)
treeaa2c5cd32056e24e075310d950f4385203c7cc87
parentee8ef0a4b167c1f2605bc9b5701c603224673d53 (diff)
userfaultfd: use fault_wqh lock
The userfaultfd code currently uses the unlocked waitqueue helpers for managing fault_wqh, but instead of holding the waitqueue lock for this waitqueue around these calls, it the waitqueue lock of fault_pending_wq, which is a different waitqueue instance. Given that the waitqueue is not exposed to the rest of the kernel this actually works ok at the moment, but prevents the userfaultfd locking rules from being enforced using lockdep. Switch to the internally locked waitqueue helpers instead. This means that the lock inside fault_wqh now nests inside the fault_pending_wqh lock, but that's not a problem since it was entirely unused before. [[email protected]: slight changelog updates] [[email protected]: spotted changelog spellos] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox <[email protected]> Signed-off-by: Christoph Hellwig <[email protected]> Reviewed-by: Mike Rapoport <[email protected]> Cc: Al Viro <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Jason Baron <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Davidlohr Bueso <[email protected]> Cc: Matthew Wilcox <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
-rw-r--r--fs/userfaultfd.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 15c265d450bf..f649023b19b5 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -910,7 +910,7 @@ wakeup:
*/
spin_lock(&ctx->fault_pending_wqh.lock);
__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
- __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, &range);
+ __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range);
spin_unlock(&ctx->fault_pending_wqh.lock);
/* Flush pending events that may still wait on event_wqh */
@@ -1066,7 +1066,7 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
* anyway.
*/
list_del(&uwq->wq.entry);
- __add_wait_queue(&ctx->fault_wqh, &uwq->wq);
+ add_wait_queue(&ctx->fault_wqh, &uwq->wq);
write_seqcount_end(&ctx->refile_seq);
@@ -1215,7 +1215,7 @@ static void __wake_userfault(struct userfaultfd_ctx *ctx,
__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
range);
if (waitqueue_active(&ctx->fault_wqh))
- __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, range);
+ __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range);
spin_unlock(&ctx->fault_pending_wqh.lock);
}