diff options
Diffstat (limited to 'kernel/locking/rtmutex.c')
| -rw-r--r-- | kernel/locking/rtmutex.c | 26 | 
1 files changed, 19 insertions, 7 deletions
| diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 6f3dba6e4e9e..65cc0cb984e6 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1290,6 +1290,19 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,  	return ret;  } +static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock) +{ +	int ret = try_to_take_rt_mutex(lock, current, NULL); + +	/* +	 * try_to_take_rt_mutex() sets the lock waiters bit +	 * unconditionally. Clean this up. +	 */ +	fixup_rt_mutex_waiters(lock); + +	return ret; +} +  /*   * Slow path try-lock function:   */ @@ -1312,13 +1325,7 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)  	 */  	raw_spin_lock_irqsave(&lock->wait_lock, flags); -	ret = try_to_take_rt_mutex(lock, current, NULL); - -	/* -	 * try_to_take_rt_mutex() sets the lock waiters bit -	 * unconditionally. Clean this up. -	 */ -	fixup_rt_mutex_waiters(lock); +	ret = __rt_mutex_slowtrylock(lock);  	raw_spin_unlock_irqrestore(&lock->wait_lock, flags); @@ -1505,6 +1512,11 @@ int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)  	return rt_mutex_slowtrylock(lock);  } +int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock) +{ +	return __rt_mutex_slowtrylock(lock); +} +  /**   * rt_mutex_timed_lock - lock a rt_mutex interruptible   *			the timeout structure is provided |