aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWaiman Long <[email protected]>2016-05-17 21:26:20 -0400
committerIngo Molnar <[email protected]>2016-06-08 15:16:59 +0200
commitfb6a44f33be542fd81575ff93a4e8118d6a58592 (patch)
tree6750c84b596d15ac1e9c30b1a57de0cd31efdedd
parent19c5d690e41697fcdd19379ab9d10d8d37818414 (diff)
locking/rwsem: Protect all writes to owner by WRITE_ONCE()
Without using WRITE_ONCE(), the compiler can potentially break a write into multiple smaller ones (store tearing). So a read from the same data by another task concurrently may return a partial result. This can result in a kernel crash if the data is a memory address that is being dereferenced. This patch changes all write to rwsem->owner to use WRITE_ONCE() to make sure that store tearing will not happen. READ_ONCE() may not be needed for rwsem->owner as long as the value is only used for comparison and not dereferencing. Signed-off-by: Waiman Long <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Dave Chinner <[email protected]> Cc: Davidlohr Bueso <[email protected]> Cc: Douglas Hatch <[email protected]> Cc: Jason Low <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Paul E. McKenney <[email protected]> Cc: Peter Hurley <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Scott J Norton <[email protected]> Cc: Thomas Gleixner <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
-rw-r--r--kernel/locking/rwsem.h13
1 files changed, 10 insertions, 3 deletions
diff --git a/kernel/locking/rwsem.h b/kernel/locking/rwsem.h
index 8f43ba234787..a699f4048ba1 100644
--- a/kernel/locking/rwsem.h
+++ b/kernel/locking/rwsem.h
@@ -16,14 +16,21 @@
#define RWSEM_READER_OWNED ((struct task_struct *)1UL)
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+/*
+ * All writes to owner are protected by WRITE_ONCE() to make sure that
+ * store tearing can't happen as optimistic spinners may read and use
+ * the owner value concurrently without lock. Read from owner, however,
+ * may not need READ_ONCE() as long as the pointer value is only used
+ * for comparison and isn't being dereferenced.
+ */
static inline void rwsem_set_owner(struct rw_semaphore *sem)
{
- sem->owner = current;
+ WRITE_ONCE(sem->owner, current);
}
static inline void rwsem_clear_owner(struct rw_semaphore *sem)
{
- sem->owner = NULL;
+ WRITE_ONCE(sem->owner, NULL);
}
static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
@@ -34,7 +41,7 @@ static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
* to minimize cacheline contention.
*/
if (sem->owner != RWSEM_READER_OWNED)
- sem->owner = RWSEM_READER_OWNED;
+ WRITE_ONCE(sem->owner, RWSEM_READER_OWNED);
}
static inline bool rwsem_owner_is_writer(struct task_struct *owner)