diff options
author | Guo Ren <guoren@linux.alibaba.com> | 2022-07-23 21:32:34 -0400 |
---|---|---|
committer | Guo Ren <guoren@linux.alibaba.com> | 2022-07-31 05:24:42 -0400 |
commit | 45e15c1a375ea380d55880be2f8182cb737b60ed (patch) | |
tree | d877ed3a2314bfe6ebfb2a6aa57ea9ef0ec4ec91 /arch/csky/include/asm/cmpxchg.h | |
parent | 4e8bb4ba5a558159ffbfa7e60322a1c151c3903c (diff) |
csky: Add qspinlock support
Enable qspinlock by the requirements mentioned in a8ad07e5240c9
("asm-generic: qspinlock: Indicate the use of mixed-size atomics").
C-SKY only has "ldex/stex" for all atomic operations. So csky give a
strong forward guarantee for "ldex/stex." That means when ldex grabbed
the cache line into $L1, it would block other cores from snooping the
address with several cycles. The atomic_fetch_add & xchg16 has the same
forward guarantee level in C-SKY.
Qspinlock has better code size and performance in a fast path.
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
Diffstat (limited to 'arch/csky/include/asm/cmpxchg.h')
-rw-r--r-- | arch/csky/include/asm/cmpxchg.h | 20 |
1 files changed, 20 insertions, 0 deletions
diff --git a/arch/csky/include/asm/cmpxchg.h b/arch/csky/include/asm/cmpxchg.h index 5b8faccd65e4..5f693fadb56c 100644 --- a/arch/csky/include/asm/cmpxchg.h +++ b/arch/csky/include/asm/cmpxchg.h @@ -15,6 +15,26 @@ extern void __bad_xchg(void); __typeof__(*(ptr)) __ret; \ unsigned long tmp; \ switch (size) { \ + case 2: { \ + u32 ret; \ + u32 shif = ((ulong)__ptr & 2) ? 16 : 0; \ + u32 mask = 0xffff << shif; \ + __ptr = (__typeof__(ptr))((ulong)__ptr & ~2); \ + __asm__ __volatile__ ( \ + "1: ldex.w %0, (%4)\n" \ + " and %1, %0, %2\n" \ + " or %1, %1, %3\n" \ + " stex.w %1, (%4)\n" \ + " bez %1, 1b\n" \ + : "=&r" (ret), "=&r" (tmp) \ + : "r" (~mask), \ + "r" ((u32)__new << shif), \ + "r" (__ptr) \ + : "memory"); \ + __ret = (__typeof__(*(ptr))) \ + ((ret & mask) >> shif); \ + break; \ + } \ case 4: \ asm volatile ( \ "1: ldex.w %0, (%3) \n" \ |