aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zijlstra (Intel) <[email protected]>2015-04-24 14:56:36 -0400
committerIngo Molnar <[email protected]>2015-05-08 12:36:58 +0200
commit2aa79af64263190eec610422b07f60e99a7d230a (patch)
tree423641cd1ae46f066739d62d743f39c7eb58c5c8
parent2c83e8e9492dc823be1d96d4c5ef75d16d3866a0 (diff)
locking/qspinlock: Revert to test-and-set on hypervisors
When we detect a hypervisor (!paravirt, see qspinlock paravirt support patches), revert to a simple test-and-set lock to avoid the horrors of queue preemption. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Waiman Long <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Boris Ostrovsky <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Daniel J Blueman <[email protected]> Cc: David Vrabel <[email protected]> Cc: Douglas Hatch <[email protected]> Cc: H. Peter Anvin <[email protected]> Cc: Konrad Rzeszutek Wilk <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Oleg Nesterov <[email protected]> Cc: Paolo Bonzini <[email protected]> Cc: Paul E. McKenney <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Raghavendra K T <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Scott J Norton <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Cc: [email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
-rw-r--r--arch/x86/include/asm/qspinlock.h14
-rw-r--r--include/asm-generic/qspinlock.h7
-rw-r--r--kernel/locking/qspinlock.c3
3 files changed, 24 insertions, 0 deletions
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index e2aee8273664..f079b7020e3f 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -1,6 +1,7 @@
#ifndef _ASM_X86_QSPINLOCK_H
#define _ASM_X86_QSPINLOCK_H
+#include <asm/cpufeature.h>
#include <asm-generic/qspinlock_types.h>
#define queued_spin_unlock queued_spin_unlock
@@ -15,6 +16,19 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
smp_store_release((u8 *)lock, 0);
}
+#define virt_queued_spin_lock virt_queued_spin_lock
+
+static inline bool virt_queued_spin_lock(struct qspinlock *lock)
+{
+ if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
+ return false;
+
+ while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0)
+ cpu_relax();
+
+ return true;
+}
+
#include <asm-generic/qspinlock.h>
#endif /* _ASM_X86_QSPINLOCK_H */
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 569abcd47a9a..83bfb87f5bf1 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -111,6 +111,13 @@ static inline void queued_spin_unlock_wait(struct qspinlock *lock)
cpu_relax();
}
+#ifndef virt_queued_spin_lock
+static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock)
+{
+ return false;
+}
+#endif
+
/*
* Initializier
*/
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 033872113ebb..fd31a474145d 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -249,6 +249,9 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
+ if (virt_queued_spin_lock(lock))
+ return;
+
/*
* wait for in-progress pending->locked hand-overs
*