diff options
Diffstat (limited to 'arch/s390/lib/spinlock.c')
| -rw-r--r-- | arch/s390/lib/spinlock.c | 25 | 
1 files changed, 8 insertions, 17 deletions
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index e5f50a7d2f4e..e48a48ec24bc 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -37,15 +37,6 @@ static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)  	asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));  } -static inline int cpu_is_preempted(int cpu) -{ -	if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu)) -		return 0; -	if (smp_vcpu_scheduled(cpu)) -		return 0; -	return 1; -} -  void arch_spin_lock_wait(arch_spinlock_t *lp)  {  	unsigned int cpu = SPINLOCK_LOCKVAL; @@ -62,7 +53,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)  			continue;  		}  		/* First iteration: check if the lock owner is running. */ -		if (first_diag && cpu_is_preempted(~owner)) { +		if (first_diag && arch_vcpu_is_preempted(~owner)) {  			smp_yield_cpu(~owner);  			first_diag = 0;  			continue; @@ -81,7 +72,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)  		 * yield the CPU unconditionally. For LPAR rely on the  		 * sense running status.  		 */ -		if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) { +		if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {  			smp_yield_cpu(~owner);  			first_diag = 0;  		} @@ -108,7 +99,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)  			continue;  		}  		/* Check if the lock owner is running. */ -		if (first_diag && cpu_is_preempted(~owner)) { +		if (first_diag && arch_vcpu_is_preempted(~owner)) {  			smp_yield_cpu(~owner);  			first_diag = 0;  			continue; @@ -127,7 +118,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)  		 * yield the CPU unconditionally. For LPAR rely on the  		 * sense running status.  		 */ -		if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) { +		if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {  			smp_yield_cpu(~owner);  			first_diag = 0;  		} @@ -165,7 +156,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)  	owner = 0;  	while (1) {  		if (count-- <= 0) { -			if (owner && cpu_is_preempted(~owner)) +			if (owner && arch_vcpu_is_preempted(~owner))  				smp_yield_cpu(~owner);  			count = spin_retry;  		} @@ -211,7 +202,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)  	owner = 0;  	while (1) {  		if (count-- <= 0) { -			if (owner && cpu_is_preempted(~owner)) +			if (owner && arch_vcpu_is_preempted(~owner))  				smp_yield_cpu(~owner);  			count = spin_retry;  		} @@ -241,7 +232,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)  	owner = 0;  	while (1) {  		if (count-- <= 0) { -			if (owner && cpu_is_preempted(~owner)) +			if (owner && arch_vcpu_is_preempted(~owner))  				smp_yield_cpu(~owner);  			count = spin_retry;  		} @@ -285,7 +276,7 @@ void arch_lock_relax(unsigned int cpu)  {  	if (!cpu)  		return; -	if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu)) +	if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))  		return;  	smp_yield_cpu(~cpu);  }  |