diff options
Diffstat (limited to 'arch/mips/include/asm/barrier.h')
| -rw-r--r-- | arch/mips/include/asm/barrier.h | 55 | 
1 files changed, 55 insertions, 0 deletions
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index a5eb1bb199a7..b865e317a14f 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h @@ -105,6 +105,20 @@   */  #define STYPE_SYNC_MB 0x10 +/* + * stype 0x14 - A completion barrier specific to global invalidations + * + * When a sync instruction of this type completes any preceding GINVI or GINVT + * operation has been globalized & completed on all coherent CPUs. Anything + * that the GINV* instruction should invalidate will have been invalidated on + * all coherent CPUs when this instruction completes. It is implementation + * specific whether the GINV* instructions themselves will ensure completion, + * or this sync type will. + * + * In systems implementing global invalidates (ie. with Config5.GI == 2 or 3) + * this sync type also requires that previous SYNCI operations have completed. + */ +#define STYPE_GINV	0x14  #ifdef CONFIG_CPU_HAS_SYNC  #define __sync()				\ @@ -222,6 +236,47 @@  #define __smp_mb__before_atomic()	__smp_mb__before_llsc()  #define __smp_mb__after_atomic()	smp_llsc_mb() +/* + * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load, + * store or pref) in between an ll & sc can cause the sc instruction to + * erroneously succeed, breaking atomicity. Whilst it's unusual to write code + * containing such sequences, this bug bites harder than we might otherwise + * expect due to reordering & speculation: + * + * 1) A memory access appearing prior to the ll in program order may actually + *    be executed after the ll - this is the reordering case. + * + *    In order to avoid this we need to place a memory barrier (ie. a sync + *    instruction) prior to every ll instruction, in between it & any earlier + *    memory access instructions. Many of these cases are already covered by + *    smp_mb__before_llsc() but for the remaining cases, typically ones in + *    which multiple CPUs may operate on a memory location but ordering is not + *    usually guaranteed, we use loongson_llsc_mb() below. + * + *    This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later. + * + * 2) If a conditional branch exists between an ll & sc with a target outside + *    of the ll-sc loop, for example an exit upon value mismatch in cmpxchg() + *    or similar, then misprediction of the branch may allow speculative + *    execution of memory accesses from outside of the ll-sc loop. + * + *    In order to avoid this we need a memory barrier (ie. a sync instruction) + *    at each affected branch target, for which we also use loongson_llsc_mb() + *    defined below. + * + *    This case affects all current Loongson 3 CPUs. + */ +#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS /* Loongson-3's LLSC workaround */ +#define loongson_llsc_mb()	__asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") +#else +#define loongson_llsc_mb()	do { } while (0) +#endif + +static inline void sync_ginv(void) +{ +	asm volatile("sync\t%0" :: "i"(STYPE_GINV)); +} +  #include <asm-generic/barrier.h>  #endif /* __ASM_BARRIER_H */  |