aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/mutex.c
diff options
context:
space:
mode:
authorNamhyung Kim <namhyung@kernel.org>2022-03-22 11:57:09 -0700
committerPeter Zijlstra <peterz@infradead.org>2022-04-05 10:24:35 +0200
commitee042be16cb455116d0fe99b77c6bc8baf87c8c6 (patch)
treed5de58e2f2882f0a433ab90a74d23fbee67aa7be /kernel/locking/mutex.c
parentlocking: Add lock contention tracepoints (diff)
downloadlinux-dev-ee042be16cb455116d0fe99b77c6bc8baf87c8c6.tar.xz
linux-dev-ee042be16cb455116d0fe99b77c6bc8baf87c8c6.zip
locking: Apply contention tracepoints in the slow path
Adding the lock contention tracepoints in various lock function slow paths. Note that each arch can define spinlock differently, I only added it only to the generic qspinlock for now. Signed-off-by: Namhyung Kim <namhyung@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Link: https://lkml.kernel.org/r/20220322185709.141236-3-namhyung@kernel.org
Diffstat (limited to 'kernel/locking/mutex.c')
-rw-r--r--kernel/locking/mutex.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index ee2fd7614a93..c88deda77cf2 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -644,6 +644,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
}
set_current_state(state);
+ trace_contention_begin(lock, 0);
for (;;) {
bool first;
@@ -710,6 +711,7 @@ acquired:
skip_wait:
/* got the lock - cleanup and rejoice! */
lock_acquired(&lock->dep_map, ip);
+ trace_contention_end(lock, 0);
if (ww_ctx)
ww_mutex_lock_acquired(ww, ww_ctx);
@@ -721,6 +723,7 @@ skip_wait:
err:
__set_current_state(TASK_RUNNING);
__mutex_remove_waiter(lock, &waiter);
+ trace_contention_end(lock, ret);
err_early_kill:
raw_spin_unlock(&lock->wait_lock);
debug_mutex_free_waiter(&waiter);