diff options
| author | Dmitry Torokhov <[email protected]> | 2022-08-02 10:06:12 -0700 |
|---|---|---|
| committer | Dmitry Torokhov <[email protected]> | 2022-08-02 10:06:12 -0700 |
| commit | 8bb5e7f4dcd9b9ef22a3ea25c9066a8a968f12dd (patch) | |
| tree | 0f1383880607a227142f9388a066959926233ff1 /kernel/locking/rtmutex.c | |
| parent | 2a96271fb66c499e4a89d76a89d3d01170c10bef (diff) | |
| parent | 7c744d00990ea999d27f306f6db5ccb61b1304b2 (diff) | |
Merge branch 'next' into for-linus
Prepare input updates for 5.20 (or 6.0) merge window.
Diffstat (limited to 'kernel/locking/rtmutex.c')
| -rw-r--r-- | kernel/locking/rtmutex.c | 11 |
1 files changed, 11 insertions, 0 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 8555c4efe97c..7779ee8abc2a 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -24,6 +24,8 @@ #include <linux/sched/wake_q.h> #include <linux/ww_mutex.h> +#include <trace/events/lock.h> + #include "rtmutex_common.h" #ifndef WW_RT @@ -1579,6 +1581,8 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, set_current_state(state); + trace_contention_begin(lock, LCB_F_RT); + ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk); if (likely(!ret)) ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter); @@ -1601,6 +1605,9 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, * unconditionally. We might have to fix that up. */ fixup_rt_mutex_waiters(lock); + + trace_contention_end(lock, ret); + return ret; } @@ -1683,6 +1690,8 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock) /* Save current state and set state to TASK_RTLOCK_WAIT */ current_save_and_set_rtlock_wait_state(); + trace_contention_begin(lock, LCB_F_RT); + task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK); for (;;) { @@ -1712,6 +1721,8 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock) */ fixup_rt_mutex_waiters(lock); debug_rt_mutex_free_waiter(&waiter); + + trace_contention_end(lock, 0); } static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock) |