diff options
Diffstat (limited to 'kernel/locking/mutex.c')
| -rw-r--r-- | kernel/locking/mutex.c | 25 | 
1 files changed, 14 insertions, 11 deletions
| diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index adb935090768..622ebdfcd083 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -626,7 +626,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)   */  static __always_inline bool  mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, -		      const bool use_ww_ctx, struct mutex_waiter *waiter) +		      struct mutex_waiter *waiter)  {  	if (!waiter) {  		/* @@ -702,7 +702,7 @@ fail:  #else  static __always_inline bool  mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, -		      const bool use_ww_ctx, struct mutex_waiter *waiter) +		      struct mutex_waiter *waiter)  {  	return false;  } @@ -922,6 +922,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,  	struct ww_mutex *ww;  	int ret; +	if (!use_ww_ctx) +		ww_ctx = NULL; +  	might_sleep();  #ifdef CONFIG_DEBUG_MUTEXES @@ -929,7 +932,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,  #endif  	ww = container_of(lock, struct ww_mutex, base); -	if (use_ww_ctx && ww_ctx) { +	if (ww_ctx) {  		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))  			return -EALREADY; @@ -946,10 +949,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,  	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);  	if (__mutex_trylock(lock) || -	    mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) { +	    mutex_optimistic_spin(lock, ww_ctx, NULL)) {  		/* got the lock, yay! */  		lock_acquired(&lock->dep_map, ip); -		if (use_ww_ctx && ww_ctx) +		if (ww_ctx)  			ww_mutex_set_context_fastpath(ww, ww_ctx);  		preempt_enable();  		return 0; @@ -960,7 +963,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,  	 * After waiting to acquire the wait_lock, try again.  	 */  	if (__mutex_trylock(lock)) { -		if (use_ww_ctx && ww_ctx) +		if (ww_ctx)  			__ww_mutex_check_waiters(lock, ww_ctx);  		goto skip_wait; @@ -1013,7 +1016,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,  			goto err;  		} -		if (use_ww_ctx && ww_ctx) { +		if (ww_ctx) {  			ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);  			if (ret)  				goto err; @@ -1026,7 +1029,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,  		 * ww_mutex needs to always recheck its position since its waiter  		 * list is not FIFO ordered.  		 */ -		if ((use_ww_ctx && ww_ctx) || !first) { +		if (ww_ctx || !first) {  			first = __mutex_waiter_is_first(lock, &waiter);  			if (first)  				__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF); @@ -1039,7 +1042,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,  		 * or we must see its unlock and acquire.  		 */  		if (__mutex_trylock(lock) || -		    (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter))) +		    (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))  			break;  		spin_lock(&lock->wait_lock); @@ -1048,7 +1051,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,  acquired:  	__set_current_state(TASK_RUNNING); -	if (use_ww_ctx && ww_ctx) { +	if (ww_ctx) {  		/*  		 * Wound-Wait; we stole the lock (!first_waiter), check the  		 * waiters as anyone might want to wound us. @@ -1068,7 +1071,7 @@ skip_wait:  	/* got the lock - cleanup and rejoice! */  	lock_acquired(&lock->dep_map, ip); -	if (use_ww_ctx && ww_ctx) +	if (ww_ctx)  		ww_mutex_lock_acquired(ww, ww_ctx);  	spin_unlock(&lock->wait_lock); |