diff options
Diffstat (limited to 'arch/powerpc/include/asm/spinlock.h')
| -rw-r--r-- | arch/powerpc/include/asm/spinlock.h | 62 | 
1 files changed, 45 insertions, 17 deletions
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index a47f827bc5f1..e9a960e28f3c 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -101,15 +101,43 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)  #if defined(CONFIG_PPC_SPLPAR)  /* We only yield to the hypervisor if we are in shared processor mode */ -#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) -extern void __spin_yield(arch_spinlock_t *lock); -extern void __rw_yield(arch_rwlock_t *lock); +void splpar_spin_yield(arch_spinlock_t *lock); +void splpar_rw_yield(arch_rwlock_t *lock);  #else /* SPLPAR */ -#define __spin_yield(x)	barrier() -#define __rw_yield(x)	barrier() -#define SHARED_PROCESSOR	0 +static inline void splpar_spin_yield(arch_spinlock_t *lock) {}; +static inline void splpar_rw_yield(arch_rwlock_t *lock) {};  #endif +static inline bool is_shared_processor(void) +{ +/* + * LPPACA is only available on Pseries so guard anything LPPACA related to + * allow other platforms (which include this common header) to compile. + */ +#ifdef CONFIG_PPC_PSERIES +	return (IS_ENABLED(CONFIG_PPC_SPLPAR) && +		lppaca_shared_proc(local_paca->lppaca_ptr)); +#else +	return false; +#endif +} + +static inline void spin_yield(arch_spinlock_t *lock) +{ +	if (is_shared_processor()) +		splpar_spin_yield(lock); +	else +		barrier(); +} + +static inline void rw_yield(arch_rwlock_t *lock) +{ +	if (is_shared_processor()) +		splpar_rw_yield(lock); +	else +		barrier(); +} +  static inline void arch_spin_lock(arch_spinlock_t *lock)  {  	while (1) { @@ -117,8 +145,8 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)  			break;  		do {  			HMT_low(); -			if (SHARED_PROCESSOR) -				__spin_yield(lock); +			if (is_shared_processor()) +				splpar_spin_yield(lock);  		} while (unlikely(lock->slock != 0));  		HMT_medium();  	} @@ -136,8 +164,8 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)  		local_irq_restore(flags);  		do {  			HMT_low(); -			if (SHARED_PROCESSOR) -				__spin_yield(lock); +			if (is_shared_processor()) +				splpar_spin_yield(lock);  		} while (unlikely(lock->slock != 0));  		HMT_medium();  		local_irq_restore(flags_dis); @@ -226,8 +254,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw)  			break;  		do {  			HMT_low(); -			if (SHARED_PROCESSOR) -				__rw_yield(rw); +			if (is_shared_processor()) +				splpar_rw_yield(rw);  		} while (unlikely(rw->lock < 0));  		HMT_medium();  	} @@ -240,8 +268,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw)  			break;  		do {  			HMT_low(); -			if (SHARED_PROCESSOR) -				__rw_yield(rw); +			if (is_shared_processor()) +				splpar_rw_yield(rw);  		} while (unlikely(rw->lock != 0));  		HMT_medium();  	} @@ -281,9 +309,9 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)  	rw->lock = 0;  } -#define arch_spin_relax(lock)	__spin_yield(lock) -#define arch_read_relax(lock)	__rw_yield(lock) -#define arch_write_relax(lock)	__rw_yield(lock) +#define arch_spin_relax(lock)	spin_yield(lock) +#define arch_read_relax(lock)	rw_yield(lock) +#define arch_write_relax(lock)	rw_yield(lock)  /* See include/linux/spinlock.h */  #define smp_mb__after_spinlock()   smp_mb()  |