diff options
Diffstat (limited to 'kernel/locking/rtmutex.c')
| -rw-r--r-- | kernel/locking/rtmutex.c | 10 | 
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 0c6a48dfcecb..8555c4efe97c 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1103,8 +1103,11 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,  	 * the other will detect the deadlock and return -EDEADLOCK,  	 * which is wrong, as the other waiter is not in a deadlock  	 * situation. +	 * +	 * Except for ww_mutex, in that case the chain walk must already deal +	 * with spurious cycles, see the comments at [3] and [6].  	 */ -	if (owner == task) +	if (owner == task && !(build_ww_mutex() && ww_ctx))  		return -EDEADLK;  	raw_spin_lock(&task->pi_lock); @@ -1379,9 +1382,8 @@ static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,  		 *    for CONFIG_PREEMPT_RCU=y)  		 *  - the VCPU on which owner runs is preempted  		 */ -		if (!owner->on_cpu || need_resched() || -		    rt_mutex_waiter_is_top_waiter(lock, waiter) || -		    vcpu_is_preempted(task_cpu(owner))) { +		if (!owner_on_cpu(owner) || need_resched() || +		    !rt_mutex_waiter_is_top_waiter(lock, waiter)) {  			res = false;  			break;  		}  |