diff options
Diffstat (limited to 'kernel/locking/osq_lock.c')
| -rw-r--r-- | kernel/locking/osq_lock.c | 23 | 
1 files changed, 10 insertions, 13 deletions
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c index 6ef600aa0f47..1f7734949ac8 100644 --- a/kernel/locking/osq_lock.c +++ b/kernel/locking/osq_lock.c @@ -134,20 +134,17 @@ bool osq_lock(struct optimistic_spin_queue *lock)  	 * cmpxchg in an attempt to undo our queueing.  	 */ -	while (!READ_ONCE(node->locked)) { -		/* -		 * If we need to reschedule bail... so we can block. -		 * Use vcpu_is_preempted() to avoid waiting for a preempted -		 * lock holder: -		 */ -		if (need_resched() || vcpu_is_preempted(node_cpu(node->prev))) -			goto unqueue; - -		cpu_relax(); -	} -	return true; +	/* +	 * Wait to acquire the lock or cancelation. Note that need_resched() +	 * will come with an IPI, which will wake smp_cond_load_relaxed() if it +	 * is implemented with a monitor-wait. vcpu_is_preempted() relies on +	 * polling, be careful. +	 */ +	if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() || +				  vcpu_is_preempted(node_cpu(node->prev)))) +		return true; -unqueue: +	/* unqueue */  	/*  	 * Step - A  -- stabilize @prev  	 *  |