diff options
author | Will Deacon <[email protected]> | 2021-08-09 16:24:28 +0100 |
---|---|---|
committer | Marc Zyngier <[email protected]> | 2021-08-11 11:39:35 +0100 |
commit | d21292f13f1f0721d60e8122e2db46bea8cf6950 (patch) | |
tree | c3e61017f835146991d86b43e78ecd3988da0f7e | |
parent | 63db506e07622c344a3c748a1c06293d48780f83 (diff) |
KVM: arm64: Add hyp_spin_is_locked() for basic locking assertions at EL2
Introduce hyp_spin_is_locked() so that functions can easily assert that
a given lock is held (albeit possibly by another CPU!) without having to
drag full lockdep support up to EL2.
Signed-off-by: Will Deacon <[email protected]>
Signed-off-by: Quentin Perret <[email protected]>
Signed-off-by: Marc Zyngier <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
-rw-r--r-- | arch/arm64/kvm/hyp/include/nvhe/spinlock.h | 8 |
1 files changed, 8 insertions, 0 deletions
diff --git a/arch/arm64/kvm/hyp/include/nvhe/spinlock.h b/arch/arm64/kvm/hyp/include/nvhe/spinlock.h index 76b537f8d1c6..04f65b655fcf 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/spinlock.h +++ b/arch/arm64/kvm/hyp/include/nvhe/spinlock.h @@ -15,6 +15,7 @@ #include <asm/alternative.h> #include <asm/lse.h> +#include <asm/rwonce.h> typedef union hyp_spinlock { u32 __val; @@ -89,4 +90,11 @@ static inline void hyp_spin_unlock(hyp_spinlock_t *lock) : "memory"); } +static inline bool hyp_spin_is_locked(hyp_spinlock_t *lock) +{ + hyp_spinlock_t lockval = READ_ONCE(*lock); + + return lockval.owner != lockval.next; +} + #endif /* __ARM64_KVM_NVHE_SPINLOCK_H__ */ |